diff --git a/go.mod b/go.mod index 055f53036..8ff83caf4 100644 --- a/go.mod +++ b/go.mod @@ -25,13 +25,13 @@ require ( golang.org/x/crypto v0.9.0 gopkg.in/yaml.v2 v2.4.0 gotest.tools/v3 v3.4.0 - k8s.io/api v0.26.3 - k8s.io/apiextensions-apiserver v0.26.2 + k8s.io/api v0.27.2 + k8s.io/apiextensions-apiserver v0.27.2 k8s.io/apimachinery v0.27.3 k8s.io/cli-runtime v0.26.3 - k8s.io/client-go v0.26.3 + k8s.io/client-go v0.27.2 k8s.io/kubectl v0.26.3 - sigs.k8s.io/controller-runtime v0.14.6 + sigs.k8s.io/controller-runtime v0.15.0 sigs.k8s.io/yaml v1.3.0 ) @@ -122,9 +122,9 @@ require ( github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.14.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.41.0 // indirect + github.com/prometheus/client_golang v1.15.1 // indirect + github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect @@ -148,7 +148,7 @@ require ( golang.org/x/term v0.8.0 // indirect golang.org/x/text v0.9.0 // indirect golang.org/x/time v0.3.0 // indirect - gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect gomodules.xyz/jsonpatch/v3 v3.0.1 // indirect gomodules.xyz/orderedmap v0.1.0 // indirect google.golang.org/appengine v1.6.7 // indirect @@ -156,7 +156,7 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/component-base v0.26.3 // indirect + k8s.io/component-base v0.27.2 // indirect k8s.io/klog/v2 v2.90.1 // indirect k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 // indirect diff --git a/go.sum b/go.sum index cb360833e..8d07905bf 100644 --- a/go.sum +++ b/go.sum @@ -214,7 +214,6 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= @@ -253,7 +252,7 @@ github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= -github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= @@ -529,11 +528,11 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU= +github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= +github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8= @@ -561,22 +560,22 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= +github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.41.0 h1:npo01n6vUlRViIj5fgwiK8vlNIh8bnoxqh3gypKsyAw= -github.com/prometheus/common v0.41.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -590,7 +589,7 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -720,7 +719,7 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= @@ -1017,13 +1016,13 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= +golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= -gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= +gomodules.xyz/jsonpatch/v2 v2.3.0 h1:8NFhfS6gzxNqjLIYnZxg319wZ5Qjnx4m/CcX+Klzazc= +gomodules.xyz/jsonpatch/v2 v2.3.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= gomodules.xyz/jsonpatch/v3 v3.0.1 h1:Te7hKxV52TKCbNYq3t84tzKav3xhThdvSsSp/W89IyI= gomodules.xyz/jsonpatch/v3 v3.0.1/go.mod h1:CBhndykehEwTOlEfnsfJwvkFQbSN8YZFr9M+cIHAJto= gomodules.xyz/orderedmap v0.1.0 h1:fM/+TGh/O1KkqGR5xjTKg6bU8OKBkg7p0Y+x/J9m8Os= @@ -1191,8 +1190,8 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.23.4/go.mod h1:i77F4JfyNNrhOjZF7OwwNJS5Y1S9dpwvb9iYRYRczfI= -k8s.io/api v0.26.3 h1:emf74GIQMTik01Aum9dPP0gAypL8JTLl/lHa4V9RFSU= -k8s.io/api v0.26.3/go.mod h1:PXsqwPMXBSBcL1lJ9CYDKy7kIReUydukS5JiRlxC3qE= +k8s.io/api v0.27.2 h1:+H17AJpUMvl+clT+BPnKf0E3ksMAzoBBg7CntpSuADo= +k8s.io/api v0.27.2/go.mod h1:ENmbocXfBT2ADujUXcBhHV55RIT31IIEvkntP6vZKS4= k8s.io/apiextensions-apiserver v0.23.4 h1:AFDUEu/yEf0YnuZhqhIFhPLPhhcQQVuR1u3WCh0rveU= k8s.io/apiextensions-apiserver v0.23.4/go.mod h1:TWYAKymJx7nLMxWCgWm2RYGXHrGlVZnxIlGnvtfYu+g= k8s.io/apimachinery v0.23.4/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= @@ -1202,12 +1201,12 @@ k8s.io/apiserver v0.23.4/go.mod h1:A6l/ZcNtxGfPSqbFDoxxOjEjSKBaQmE+UTveOmMkpNc= k8s.io/cli-runtime v0.26.3 h1:3ULe0oI28xmgeLMVXIstB+ZL5CTGvWSMVMLeHxitIuc= k8s.io/cli-runtime v0.26.3/go.mod h1:5YEhXLV4kLt/OSy9yQwtSSNZU2Z7aTEYta1A+Jg4VC4= k8s.io/client-go v0.23.4/go.mod h1:PKnIL4pqLuvYUK1WU7RLTMYKPiIh7MYShLshtRY9cj0= -k8s.io/client-go v0.26.3 h1:k1UY+KXfkxV2ScEL3gilKcF7761xkYsSD6BC9szIu8s= -k8s.io/client-go v0.26.3/go.mod h1:ZPNu9lm8/dbRIPAgteN30RSXea6vrCpFvq+MateTUuQ= +k8s.io/client-go v0.27.2 h1:vDLSeuYvCHKeoQRhCXjxXO45nHVv2Ip4Fe0MfioMrhE= +k8s.io/client-go v0.27.2/go.mod h1:tY0gVmUsHrAmjzHX9zs7eCjxcBsf8IiNe7KQ52biTcQ= k8s.io/code-generator v0.23.4/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/component-base v0.23.4/go.mod h1:8o3Gg8i2vnUXGPOwciiYlkSaZT+p+7gA9Scoz8y4W4E= -k8s.io/component-base v0.26.3 h1:oC0WMK/ggcbGDTkdcqefI4wIZRYdK3JySx9/HADpV0g= -k8s.io/component-base v0.26.3/go.mod h1:5kj1kZYwSC6ZstHJN7oHBqcJC6yyn41eR+Sqa/mQc8E= +k8s.io/component-base v0.27.2 h1:neju+7s/r5O4x4/txeUONNTS9r1HsPbyoPBAtHsDCpo= +k8s.io/component-base v0.27.2/go.mod h1:5UPk7EjfgrfgRIuDBFtsEFAe4DAvP3U+M8RTzoSJkpo= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= @@ -1227,8 +1226,8 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.27/go.mod h1:tq2nT0Kx7W+/f2JVE+zxYtUhdjuELJkVpNz+x/QN5R4= -sigs.k8s.io/controller-runtime v0.14.6 h1:oxstGVvXGNnMvY7TAESYk+lzr6S3V5VFxQ6d92KcwQA= -sigs.k8s.io/controller-runtime v0.14.6/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0= +sigs.k8s.io/controller-runtime v0.15.0 h1:ML+5Adt3qZnMSYxZ7gAverBLNPSMQEibtzAgp0UPojU= +sigs.k8s.io/controller-runtime v0.15.0/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go index 246c5ea94..2f5616894 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go @@ -28,6 +28,8 @@ var ( MetricsAll = GoRuntimeMetricsRule{regexp.MustCompile("/.*")} // MetricsGC allows only GC metrics to be collected from Go runtime. // e.g. go_gc_cycles_automatic_gc_cycles_total + // NOTE: This does not include new class of "/cpu/classes/gc/..." metrics. + // Use custom metric rule to access those. MetricsGC = GoRuntimeMetricsRule{regexp.MustCompile(`^/gc/.*`)} // MetricsMemory allows only memory metrics to be collected from Go runtime. // e.g. go_memory_classes_heap_free_bytes diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index a912b75a0..62de4dc59 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -59,6 +59,18 @@ type ExemplarAdder interface { // CounterOpts is an alias for Opts. See there for doc comments. type CounterOpts Opts +// CounterVecOpts bundles the options to create a CounterVec metric. +// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type CounterVecOpts struct { + CounterOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Contraint + // function, if provided. + VariableLabels ConstrainableLabels +} + // NewCounter creates a new Counter based on the provided CounterOpts. // // The returned implementation also implements ExemplarAdder. It is safe to @@ -174,16 +186,24 @@ type CounterVec struct { // NewCounterVec creates a new CounterVec based on the provided CounterOpts and // partitioned by the given label names. func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { - desc := NewDesc( + return V2.NewCounterVec(CounterVecOpts{ + CounterOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts. +func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec { + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) return &CounterVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs)) } result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now} result.init(result) // Init self-collection. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 8bc5e44e2..12331542d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -14,20 +14,16 @@ package prometheus import ( - "errors" "fmt" "sort" "strings" - "github.com/cespare/xxhash/v2" - "github.com/prometheus/client_golang/prometheus/internal" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" - + "github.com/cespare/xxhash/v2" dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" + "google.golang.org/protobuf/proto" ) // Desc is the descriptor used by every Prometheus Metric. It is essentially @@ -54,9 +50,9 @@ type Desc struct { // constLabelPairs contains precalculated DTO label pairs based on // the constant labels. constLabelPairs []*dto.LabelPair - // variableLabels contains names of labels for which the metric - // maintains variable values. - variableLabels []string + // variableLabels contains names of labels and normalization function for + // which the metric maintains variable values. + variableLabels ConstrainedLabels // id is a hash of the values of the ConstLabels and fqName. This // must be unique among all registered descriptors and can therefore be // used as an identifier of the descriptor. @@ -80,10 +76,24 @@ type Desc struct { // For constLabels, the label values are constant. Therefore, they are fully // specified in the Desc. See the Collector example for a usage pattern. func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels) +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName must not be empty. +// +// variableLabels only contain the label names and normalization functions. Their +// label values are variable and therefore not part of the Desc. (They are managed +// within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Collector example for a usage pattern. +func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc { d := &Desc{ fqName: fqName, help: help, - variableLabels: variableLabels, + variableLabels: variableLabels.constrainedLabels(), } if !model.IsValidMetricName(model.LabelValue(fqName)) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) @@ -93,7 +103,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * // their sorted label names) plus the fqName (at position 0). labelValues := make([]string, 1, len(constLabels)+1) labelValues[0] = fqName - labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels)) labelNameSet := map[string]struct{}{} // First add only the const label names and sort them... for labelName := range constLabels { @@ -118,16 +128,16 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * // Now add the variable label names, but prefix them with something that // cannot be in a regular label name. That prevents matching the label // dimension with a different mix between preset and variable labels. - for _, labelName := range variableLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) + for _, label := range d.variableLabels { + if !checkLabelName(label.Name) { + d.err = fmt.Errorf("%q is not a valid label name for metric %q", label.Name, fqName) return d } - labelNames = append(labelNames, "$"+labelName) - labelNameSet[labelName] = struct{}{} + labelNames = append(labelNames, "$"+label.Name) + labelNameSet[label.Name] = struct{}{} } if len(labelNames) != len(labelNameSet) { - d.err = errors.New("duplicate label names") + d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName) return d } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index 811072cbd..962608f02 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -37,35 +37,35 @@ // // type metrics struct { // cpuTemp prometheus.Gauge -// hdFailures *prometheus.CounterVec +// hdFailures *prometheus.CounterVec // } // // func NewMetrics(reg prometheus.Registerer) *metrics { -// m := &metrics{ -// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{ -// Name: "cpu_temperature_celsius", -// Help: "Current temperature of the CPU.", -// }), -// hdFailures: prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hd_errors_total", -// Help: "Number of hard-disk errors.", -// }, -// []string{"device"}, -// ), -// } -// reg.MustRegister(m.cpuTemp) -// reg.MustRegister(m.hdFailures) -// return m +// m := &metrics{ +// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }), +// hdFailures: prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ), +// } +// reg.MustRegister(m.cpuTemp) +// reg.MustRegister(m.hdFailures) +// return m // } // // func main() { -// // Create a non-global registry. -// reg := prometheus.NewRegistry() +// // Create a non-global registry. +// reg := prometheus.NewRegistry() // -// // Create new metrics and register them using the custom registry. -// m := NewMetrics(reg) -// // Set values for the new created metrics. +// // Create new metrics and register them using the custom registry. +// m := NewMetrics(reg) +// // Set values for the new created metrics. // m.cpuTemp.Set(65.3) // m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() // diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index 21271a5bb..f1ea6c76f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -55,6 +55,18 @@ type Gauge interface { // GaugeOpts is an alias for Opts. See there for doc comments. type GaugeOpts Opts +// GaugeVecOpts bundles the options to create a GaugeVec metric. +// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type GaugeVecOpts struct { + GaugeOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Contraint + // function, if provided. + VariableLabels ConstrainableLabels +} + // NewGauge creates a new Gauge based on the provided GaugeOpts. // // The returned implementation is optimized for a fast Set method. If you have a @@ -138,16 +150,24 @@ type GaugeVec struct { // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and // partitioned by the given label names. func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { - desc := NewDesc( + return V2.NewGaugeVec(GaugeVecOpts{ + GaugeOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts. +func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec { + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) return &GaugeVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs)) } result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)} result.init(result) // Init self-collection. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index 3a2d55e84..2d8d9f64f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -23,11 +23,10 @@ import ( "strings" "sync" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/client_golang/prometheus/internal" + + dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" ) const ( diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 4c873a01c..5b69965b2 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -22,10 +22,9 @@ import ( "sync/atomic" "time" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - dto "github.com/prometheus/client_model/go" + + "google.golang.org/protobuf/proto" ) // nativeHistogramBounds for the frac of observed values. Only relevant for @@ -469,6 +468,18 @@ type HistogramOpts struct { NativeHistogramMaxZeroThreshold float64 } +// HistogramVecOpts bundles the options to create a HistogramVec metric. +// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type HistogramVecOpts struct { + HistogramOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Contraint + // function, if provided. + VariableLabels ConstrainableLabels +} + // NewHistogram creates a new Histogram based on the provided HistogramOpts. It // panics if the buckets in HistogramOpts are not in strictly increasing order. // @@ -489,11 +500,11 @@ func NewHistogram(opts HistogramOpts) Histogram { func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues)) } for _, n := range desc.variableLabels { - if n == bucketLabel { + if n.Name == bucketLabel { panic(errBucketLabelNotAllowed) } } @@ -544,16 +555,12 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } // Finally we know the final length of h.upperBounds and can make buckets // for both counts as well as exemplars: - h.counts[0] = &histogramCounts{ - buckets: make([]uint64, len(h.upperBounds)), - nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), - nativeHistogramSchema: h.nativeHistogramSchema, - } - h.counts[1] = &histogramCounts{ - buckets: make([]uint64, len(h.upperBounds)), - nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), - nativeHistogramSchema: h.nativeHistogramSchema, - } + h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))} + atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) + atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema) + h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))} + atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) + atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema) h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) h.init(h) // Init self-collection. @@ -1034,15 +1041,23 @@ type HistogramVec struct { // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and // partitioned by the given label names. func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { - desc := NewDesc( + return V2.NewHistogramVec(HistogramVecOpts{ + HistogramOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts. +func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec { + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) return &HistogramVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - return newHistogram(desc, opts, lvs...) + return newHistogram(desc, opts.HistogramOpts, lvs...) }), } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index c1b8fad36..63ff8683c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -32,6 +32,78 @@ import ( // create a Desc. type Labels map[string]string +// ConstrainedLabels represents a label name and its constrain function +// to normalize label values. This type is commonly used when constructing +// metric vector Collectors. +type ConstrainedLabel struct { + Name string + Constraint func(string) string +} + +func (cl ConstrainedLabel) Constrain(v string) string { + if cl.Constraint == nil { + return v + } + return cl.Constraint(v) +} + +// ConstrainableLabels is an interface that allows creating of labels that can +// be optionally constrained. +// +// prometheus.V2().NewCounterVec(CounterVecOpts{ +// CounterOpts: {...}, // Usual CounterOpts fields +// VariableLabels: []ConstrainedLabels{ +// {Name: "A"}, +// {Name: "B", Constraint: func(v string) string { ... }}, +// }, +// }) +type ConstrainableLabels interface { + constrainedLabels() ConstrainedLabels + labelNames() []string +} + +// ConstrainedLabels represents a collection of label name -> constrain function +// to normalize label values. This type is commonly used when constructing +// metric vector Collectors. +type ConstrainedLabels []ConstrainedLabel + +func (cls ConstrainedLabels) constrainedLabels() ConstrainedLabels { + return cls +} + +func (cls ConstrainedLabels) labelNames() []string { + names := make([]string, len(cls)) + for i, label := range cls { + names[i] = label.Name + } + return names +} + +// UnconstrainedLabels represents collection of label without any constraint on +// their value. Thus, it is simply a collection of label names. +// +// UnconstrainedLabels([]string{ "A", "B" }) +// +// is equivalent to +// +// ConstrainedLabels { +// { Name: "A" }, +// { Name: "B" }, +// } +type UnconstrainedLabels []string + +func (uls UnconstrainedLabels) constrainedLabels() ConstrainedLabels { + constrainedLabels := make([]ConstrainedLabel, len(uls)) + for i, l := range uls { + constrainedLabels[i] = ConstrainedLabel{Name: l} + } + return constrainedLabels +} + +func (uls UnconstrainedLabels) labelNames() []string { + return uls +} + // reservedLabelPrefix is a prefix which is not legal in user-supplied // label names. const reservedLabelPrefix = "__" diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index b5119c504..07bbc9d76 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -20,11 +20,9 @@ import ( "strings" "time" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" - dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" + "google.golang.org/protobuf/proto" ) var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go index 210867816..d3482c40c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -68,16 +68,17 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou o.apply(rtOpts) } - code, method := checkLabels(counter) + // Curry the counter with dynamic labels before checking the remaining labels. + code, method := checkLabels(counter.MustCurryWith(rtOpts.emptyDynamicLabels())) return func(r *http.Request) (*http.Response, error) { resp, err := next.RoundTrip(r) if err == nil { - addWithExemplar( - counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), - 1, - rtOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...) + for label, resolve := range rtOpts.extraLabelsFromCtx { + l[label] = resolve(resp.Request.Context()) + } + addWithExemplar(counter.With(l), 1, rtOpts.getExemplarFn(r.Context())) } return resp, err } @@ -110,17 +111,18 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT o.apply(rtOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(rtOpts.emptyDynamicLabels())) return func(r *http.Request) (*http.Response, error) { start := time.Now() resp, err := next.RoundTrip(r) if err == nil { - observeWithExemplar( - obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), - time.Since(start).Seconds(), - rtOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...) + for label, resolve := range rtOpts.extraLabelsFromCtx { + l[label] = resolve(resp.Request.Context()) + } + observeWithExemplar(obs.With(l), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context())) } return resp, err } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index cca67a78a..3793036ad 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -87,7 +87,8 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op o.apply(hOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) if code { return func(w http.ResponseWriter, r *http.Request) { @@ -95,23 +96,22 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op d := newDelegator(w, nil) next.ServeHTTP(d, r) - observeWithExemplar( - obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), - time.Since(now).Seconds(), - hOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context())) } } return func(w http.ResponseWriter, r *http.Request) { now := time.Now() next.ServeHTTP(w, r) - - observeWithExemplar( - obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), - time.Since(now).Seconds(), - hOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, 0, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context())) } } @@ -138,28 +138,30 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, o.apply(hOpts) } - code, method := checkLabels(counter) + // Curry the counter with dynamic labels before checking the remaining labels. + code, method := checkLabels(counter.MustCurryWith(hOpts.emptyDynamicLabels())) if code { return func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) - addWithExemplar( - counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), - 1, - hOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context())) } } return func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) - addWithExemplar( - counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), - 1, - hOpts.getExemplarFn(r.Context()), - ) + + l := labels(code, method, r.Method, 0, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context())) } } @@ -191,16 +193,17 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha o.apply(hOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) return func(w http.ResponseWriter, r *http.Request) { now := time.Now() d := newDelegator(w, func(status int) { - observeWithExemplar( - obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)), - time.Since(now).Seconds(), - hOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, status, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context())) }) next.ServeHTTP(d, r) } @@ -231,28 +234,32 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, o.apply(hOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) + if code { return func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) size := computeApproximateRequestSize(r) - observeWithExemplar( - obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), - float64(size), - hOpts.getExemplarFn(r.Context()), - ) + + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context())) } } return func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) size := computeApproximateRequestSize(r) - observeWithExemplar( - obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), - float64(size), - hOpts.getExemplarFn(r.Context()), - ) + + l := labels(code, method, r.Method, 0, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context())) } } @@ -281,16 +288,18 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler o.apply(hOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) - observeWithExemplar( - obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), - float64(d.Written()), - hOpts.getExemplarFn(r.Context()), - ) + + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), float64(d.Written()), hOpts.getExemplarFn(r.Context())) }) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go index c590d912c..5d4383aa1 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go @@ -24,14 +24,32 @@ type Option interface { apply(*options) } +// LabelValueFromCtx are used to compute the label value from request context. +// Context can be filled with values from request through middleware. +type LabelValueFromCtx func(ctx context.Context) string + // options store options for both a handler or round tripper. type options struct { - extraMethods []string - getExemplarFn func(requestCtx context.Context) prometheus.Labels + extraMethods []string + getExemplarFn func(requestCtx context.Context) prometheus.Labels + extraLabelsFromCtx map[string]LabelValueFromCtx } func defaultOptions() *options { - return &options{getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }} + return &options{ + getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }, + extraLabelsFromCtx: map[string]LabelValueFromCtx{}, + } +} + +func (o *options) emptyDynamicLabels() prometheus.Labels { + labels := prometheus.Labels{} + + for label := range o.extraLabelsFromCtx { + labels[label] = "" + } + + return labels } type optionApplyFunc func(*options) @@ -48,11 +66,19 @@ func WithExtraMethods(methods ...string) Option { }) } -// WithExemplarFromContext adds allows to put a hook to all counter and histogram metrics. -// If the hook function returns non-nil labels, exemplars will be added for that request, otherwise metric -// will get instrumented without exemplar. +// WithExemplarFromContext allows to inject function that will get exemplar from context that will be put to counter and histogram metrics. +// If the function returns nil labels or the metric does not support exemplars, no exemplar will be added (noop), but +// metric will continue to observe/increment. func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option { return optionApplyFunc(func(o *options) { o.getExemplarFn = getExemplarFn }) } + +// WithLabelFromCtx registers a label for dynamic resolution with access to context. +// See the example for ExampleInstrumentHandlerWithLabelResolver for example usage +func WithLabelFromCtx(name string, valueFn LabelValueFromCtx) Option { + return optionApplyFunc(func(o *options) { + o.extraLabelsFromCtx[name] = valueFn + }) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 09e34d307..44da9433b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -21,18 +21,17 @@ import ( "path/filepath" "runtime" "sort" + "strconv" "strings" "sync" "unicode/utf8" - "github.com/cespare/xxhash/v2" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/expfmt" + "github.com/prometheus/client_golang/prometheus/internal" + "github.com/cespare/xxhash/v2" dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus/internal" + "github.com/prometheus/common/expfmt" + "google.golang.org/protobuf/proto" ) const ( @@ -933,6 +932,10 @@ func checkMetricConsistency( h.WriteString(lp.GetValue()) h.Write(separatorByteSlice) } + if dtoMetric.TimestampMs != nil { + h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10)) + h.Write(separatorByteSlice) + } hSum := h.Sum64() if _, exists := metricHashes[hSum]; exists { return fmt.Errorf( @@ -962,7 +965,7 @@ func checkDescConsistency( copy(lpsFromDesc, desc.constLabelPairs) for _, l := range desc.variableLabels { lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ - Name: proto.String(l), + Name: proto.String(l.Name), }) } if len(lpsFromDesc) != len(dtoMetric.Label) { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 7bc448a89..dd359264e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -22,11 +22,10 @@ import ( "sync/atomic" "time" - "github.com/beorn7/perks/quantile" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - dto "github.com/prometheus/client_model/go" + + "github.com/beorn7/perks/quantile" + "google.golang.org/protobuf/proto" ) // quantileLabel is used for the label that defines the quantile in a @@ -148,6 +147,18 @@ type SummaryOpts struct { BufCap uint32 } +// SummaryVecOpts bundles the options to create a SummaryVec metric. +// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type SummaryVecOpts struct { + SummaryOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Contraint + // function, if provided. + VariableLabels ConstrainableLabels +} + // Problem with the sliding-window decay algorithm... The Merge method of // perk/quantile is actually not working as advertised - and it might be // unfixable, as the underlying algorithm is apparently not capable of merging @@ -178,11 +189,11 @@ func NewSummary(opts SummaryOpts) Summary { func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues)) } for _, n := range desc.variableLabels { - if n == quantileLabel { + if n.Name == quantileLabel { panic(errQuantileLabelNotAllowed) } } @@ -530,20 +541,28 @@ type SummaryVec struct { // it is handled by the Prometheus server internally, “quantile” is an illegal // label name. NewSummaryVec will panic if this label name is used. func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { - for _, ln := range labelNames { + return V2.NewSummaryVec(SummaryVecOpts{ + SummaryOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts. +func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec { + for _, ln := range opts.VariableLabels.labelNames() { if ln == quantileLabel { panic(errQuantileLabelNotAllowed) } } - desc := NewDesc( + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) return &SummaryVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - return newSummary(desc, opts, lvs...) + return newSummary(desc, opts.SummaryOpts, lvs...) }), } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go index f28a76f3a..52344fef5 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -23,7 +23,9 @@ type Timer struct { } // NewTimer creates a new Timer. The provided Observer is used to observe a -// duration in seconds. Timer is usually used to time a function call in the +// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar +// later on will be also supported. +// Timer is usually used to time a function call in the // following way: // // func TimeMe() { @@ -31,6 +33,14 @@ type Timer struct { // defer timer.ObserveDuration() // // Do actual work. // } +// +// or +// +// func TimeMeWithExemplar() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDurationWithExemplar(exemplar) +// // Do actual work. +// } func NewTimer(o Observer) *Timer { return &Timer{ begin: time.Now(), @@ -53,3 +63,19 @@ func (t *Timer) ObserveDuration() time.Duration { } return d } + +// ObserveDurationWithExemplar is like ObserveDuration, but it will also +// observe exemplar with the duration unless exemplar is nil or provided Observer can't +// be casted to ExemplarObserver. +func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration { + d := time.Since(t.begin) + eo, ok := t.observer.(ExemplarObserver) + if ok && exemplar != nil { + eo.ObserveWithExemplar(d.Seconds(), exemplar) + return d + } + if t.observer != nil { + t.observer.Observe(d.Seconds()) + } + return d +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index 2d3abc1cb..5f6bb8001 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -19,13 +19,11 @@ import ( "time" "unicode/utf8" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" - "github.com/prometheus/client_golang/prometheus/internal" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" ) // ValueType is an enumeration of metric types that represent a simple value. @@ -188,9 +186,9 @@ func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { return desc.constLabelPairs } labelPairs := make([]*dto.LabelPair, 0, totalLen) - for i, n := range desc.variableLabels { + for i, l := range desc.variableLabels { labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(n), + Name: proto.String(l.Name), Value: proto.String(labelValues[i]), }) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 7ae322590..386fb2d23 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -72,6 +72,7 @@ func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { // with a performance overhead (for creating and processing the Labels map). // See also the CounterVec example. func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { + lvs = constrainLabelValues(m.desc, lvs, m.curry) h, err := m.hashLabelValues(lvs) if err != nil { return false @@ -91,6 +92,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { // This method is used for the same purpose as DeleteLabelValues(...string). See // there for pros and cons of the two methods. func (m *MetricVec) Delete(labels Labels) bool { + labels = constrainLabels(m.desc, labels) h, err := m.hashLabels(labels) if err != nil { return false @@ -106,6 +108,7 @@ func (m *MetricVec) Delete(labels Labels) bool { // Note that curried labels will never be matched if deleting from the curried vector. // To match curried labels with DeletePartialMatch, it must be called on the base vector. func (m *MetricVec) DeletePartialMatch(labels Labels) int { + labels = constrainLabels(m.desc, labels) return m.metricMap.deleteByLabels(labels, m.curry) } @@ -145,10 +148,10 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { iCurry int ) for i, label := range m.desc.variableLabels { - val, ok := labels[label] + val, ok := labels[label.Name] if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { if ok { - return nil, fmt.Errorf("label name %q is already curried", label) + return nil, fmt.Errorf("label name %q is already curried", label.Name) } newCurry = append(newCurry, oldCurry[iCurry]) iCurry++ @@ -156,7 +159,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { if !ok { continue // Label stays uncurried. } - newCurry = append(newCurry, curriedLabelValue{i, val}) + newCurry = append(newCurry, curriedLabelValue{i, label.Constrain(val)}) } } if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { @@ -199,6 +202,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { // a wrapper around MetricVec, implementing a vector for a specific Metric // implementation, for example GaugeVec. func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { + lvs = constrainLabelValues(m.desc, lvs, m.curry) h, err := m.hashLabelValues(lvs) if err != nil { return nil, err @@ -224,6 +228,7 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { // around MetricVec, implementing a vector for a specific Metric implementation, // for example GaugeVec. func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { + labels = constrainLabels(m.desc, labels) h, err := m.hashLabels(labels) if err != nil { return nil, err @@ -266,16 +271,16 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { iCurry int ) for i, label := range m.desc.variableLabels { - val, ok := labels[label] + val, ok := labels[label.Name] if iCurry < len(curry) && curry[iCurry].index == i { if ok { - return 0, fmt.Errorf("label name %q is already curried", label) + return 0, fmt.Errorf("label name %q is already curried", label.Name) } h = m.hashAdd(h, curry[iCurry].value) iCurry++ } else { if !ok { - return 0, fmt.Errorf("label name %q missing in label map", label) + return 0, fmt.Errorf("label name %q missing in label map", label.Name) } h = m.hashAdd(h, val) } @@ -453,7 +458,7 @@ func valueMatchesVariableOrCurriedValue(targetValue string, index int, values [] func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { for l, v := range labels { // Check if the target label exists in our metrics and get the index. - varLabelIndex, validLabel := indexOf(l, desc.variableLabels) + varLabelIndex, validLabel := indexOf(l, desc.variableLabels.labelNames()) if validLabel { // Check the value of that label against the target value. // We don't consider curried values in partial matches. @@ -605,7 +610,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe iCurry++ continue } - if values[i] != labels[k] { + if values[i] != labels[k.Name] { return false } } @@ -621,7 +626,7 @@ func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) [] iCurry++ continue } - labelValues[i] = labels[k] + labelValues[i] = labels[k.Name] } return labelValues } @@ -640,3 +645,34 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { } return labelValues } + +func constrainLabels(desc *Desc, labels Labels) Labels { + constrainedValues := make(Labels, len(labels)) + for l, v := range labels { + if i, ok := indexOf(l, desc.variableLabels.labelNames()); ok { + constrainedValues[l] = desc.variableLabels[i].Constrain(v) + continue + } + constrainedValues[l] = v + } + return constrainedValues +} + +func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string { + constrainedValues := make([]string, len(lvs)) + var iCurry, iLVs int + for i := 0; i < len(lvs)+len(curry); i++ { + if iCurry < len(curry) && curry[iCurry].index == i { + iCurry++ + continue + } + + if i < len(desc.variableLabels) { + constrainedValues[iLVs] = desc.variableLabels[i].Constrain(lvs[iLVs]) + } else { + constrainedValues[iLVs] = lvs[iLVs] + } + iLVs++ + } + return constrainedValues +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vnext.go b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go new file mode 100644 index 000000000..42bc3a8f0 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go @@ -0,0 +1,23 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +type v2 struct{} + +// V2 is a struct that can be referenced to access experimental API that might +// be present in v2 of client golang someday. It offers extended functionality +// of v1 with slightly changed API. It is acceptable to use some pieces from v1 +// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc` +// in the same codebase. +var V2 = v2{} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go index 1498ee144..25da157f1 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -17,12 +17,10 @@ import ( "fmt" "sort" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" + "github.com/prometheus/client_golang/prometheus/internal" dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus/internal" + "google.golang.org/protobuf/proto" ) // WrapRegistererWith returns a Registerer wrapping the provided @@ -206,7 +204,7 @@ func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { constLabels[ln] = lv } // NewDesc will do remaining validations. - newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) + newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) // Propagate errors if there was any. This will override any errer // created by NewDesc above, i.e. earlier errors get precedence. if desc.err != nil { diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index 35904ea19..2b5bca4b9 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -1,25 +1,38 @@ +// Copyright 2013 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.20.3 // source: io/prometheus/client/metrics.proto package io_prometheus_client import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type MetricType int32 @@ -38,23 +51,25 @@ const ( MetricType_GAUGE_HISTOGRAM MetricType = 5 ) -var MetricType_name = map[int32]string{ - 0: "COUNTER", - 1: "GAUGE", - 2: "SUMMARY", - 3: "UNTYPED", - 4: "HISTOGRAM", - 5: "GAUGE_HISTOGRAM", -} - -var MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, - "SUMMARY": 2, - "UNTYPED": 3, - "HISTOGRAM": 4, - "GAUGE_HISTOGRAM": 5, -} +// Enum value maps for MetricType. +var ( + MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", + 5: "GAUGE_HISTOGRAM", + } + MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, + "GAUGE_HISTOGRAM": 5, + } +) func (x MetricType) Enum() *MetricType { p := new(MetricType) @@ -63,449 +78,519 @@ func (x MetricType) Enum() *MetricType { } func (x MetricType) String() string { - return proto.EnumName(MetricType_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -func (x *MetricType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") +func (MetricType) Descriptor() protoreflect.EnumDescriptor { + return file_io_prometheus_client_metrics_proto_enumTypes[0].Descriptor() +} + +func (MetricType) Type() protoreflect.EnumType { + return &file_io_prometheus_client_metrics_proto_enumTypes[0] +} + +func (x MetricType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *MetricType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) if err != nil { return err } - *x = MetricType(value) + *x = MetricType(num) return nil } +// Deprecated: Use MetricType.Descriptor instead. func (MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{0} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0} } type LabelPair struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *LabelPair) Reset() { *m = LabelPair{} } -func (m *LabelPair) String() string { return proto.CompactTextString(m) } -func (*LabelPair) ProtoMessage() {} -func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{0} + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` } -func (m *LabelPair) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LabelPair.Unmarshal(m, b) -} -func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) -} -func (m *LabelPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelPair.Merge(m, src) +func (x *LabelPair) Reset() { + *x = LabelPair{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *LabelPair) XXX_Size() int { - return xxx_messageInfo_LabelPair.Size(m) + +func (x *LabelPair) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *LabelPair) XXX_DiscardUnknown() { - xxx_messageInfo_LabelPair.DiscardUnknown(m) + +func (*LabelPair) ProtoMessage() {} + +func (x *LabelPair) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_LabelPair proto.InternalMessageInfo +// Deprecated: Use LabelPair.ProtoReflect.Descriptor instead. +func (*LabelPair) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0} +} -func (m *LabelPair) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (x *LabelPair) GetName() string { + if x != nil && x.Name != nil { + return *x.Name } return "" } -func (m *LabelPair) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value +func (x *LabelPair) GetValue() string { + if x != nil && x.Value != nil { + return *x.Value } return "" } type Gauge struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Gauge) Reset() { *m = Gauge{} } -func (m *Gauge) String() string { return proto.CompactTextString(m) } -func (*Gauge) ProtoMessage() {} -func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{1} + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` } -func (m *Gauge) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Gauge.Unmarshal(m, b) -} -func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) -} -func (m *Gauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_Gauge.Merge(m, src) +func (x *Gauge) Reset() { + *x = Gauge{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Gauge) XXX_Size() int { - return xxx_messageInfo_Gauge.Size(m) + +func (x *Gauge) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Gauge) XXX_DiscardUnknown() { - xxx_messageInfo_Gauge.DiscardUnknown(m) + +func (*Gauge) ProtoMessage() {} + +func (x *Gauge) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Gauge proto.InternalMessageInfo +// Deprecated: Use Gauge.ProtoReflect.Descriptor instead. +func (*Gauge) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{1} +} -func (m *Gauge) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Gauge) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Counter) Reset() { *m = Counter{} } -func (m *Counter) String() string { return proto.CompactTextString(m) } -func (*Counter) ProtoMessage() {} -func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{2} + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` } -func (m *Counter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Counter.Unmarshal(m, b) -} -func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Counter.Marshal(b, m, deterministic) -} -func (m *Counter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Counter.Merge(m, src) +func (x *Counter) Reset() { + *x = Counter{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Counter) XXX_Size() int { - return xxx_messageInfo_Counter.Size(m) + +func (x *Counter) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Counter) XXX_DiscardUnknown() { - xxx_messageInfo_Counter.DiscardUnknown(m) + +func (*Counter) ProtoMessage() {} + +func (x *Counter) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Counter proto.InternalMessageInfo +// Deprecated: Use Counter.ProtoReflect.Descriptor instead. +func (*Counter) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{2} +} -func (m *Counter) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Counter) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } -func (m *Counter) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar +func (x *Counter) GetExemplar() *Exemplar { + if x != nil { + return x.Exemplar } return nil } type Quantile struct { - Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Quantile) Reset() { *m = Quantile{} } -func (m *Quantile) String() string { return proto.CompactTextString(m) } -func (*Quantile) ProtoMessage() {} -func (*Quantile) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{3} + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` } -func (m *Quantile) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Quantile.Unmarshal(m, b) -} -func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) -} -func (m *Quantile) XXX_Merge(src proto.Message) { - xxx_messageInfo_Quantile.Merge(m, src) +func (x *Quantile) Reset() { + *x = Quantile{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Quantile) XXX_Size() int { - return xxx_messageInfo_Quantile.Size(m) + +func (x *Quantile) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Quantile) XXX_DiscardUnknown() { - xxx_messageInfo_Quantile.DiscardUnknown(m) + +func (*Quantile) ProtoMessage() {} + +func (x *Quantile) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Quantile proto.InternalMessageInfo +// Deprecated: Use Quantile.ProtoReflect.Descriptor instead. +func (*Quantile) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{3} +} -func (m *Quantile) GetQuantile() float64 { - if m != nil && m.Quantile != nil { - return *m.Quantile +func (x *Quantile) GetQuantile() float64 { + if x != nil && x.Quantile != nil { + return *x.Quantile } return 0 } -func (m *Quantile) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Quantile) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Summary struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Summary) Reset() { *m = Summary{} } -func (m *Summary) String() string { return proto.CompactTextString(m) } -func (*Summary) ProtoMessage() {} -func (*Summary) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{4} + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` } -func (m *Summary) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Summary.Unmarshal(m, b) -} -func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Summary.Marshal(b, m, deterministic) -} -func (m *Summary) XXX_Merge(src proto.Message) { - xxx_messageInfo_Summary.Merge(m, src) +func (x *Summary) Reset() { + *x = Summary{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Summary) XXX_Size() int { - return xxx_messageInfo_Summary.Size(m) + +func (x *Summary) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Summary) XXX_DiscardUnknown() { - xxx_messageInfo_Summary.DiscardUnknown(m) + +func (*Summary) ProtoMessage() {} + +func (x *Summary) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Summary proto.InternalMessageInfo +// Deprecated: Use Summary.ProtoReflect.Descriptor instead. +func (*Summary) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{4} +} -func (m *Summary) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount +func (x *Summary) GetSampleCount() uint64 { + if x != nil && x.SampleCount != nil { + return *x.SampleCount } return 0 } -func (m *Summary) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum +func (x *Summary) GetSampleSum() float64 { + if x != nil && x.SampleSum != nil { + return *x.SampleSum } return 0 } -func (m *Summary) GetQuantile() []*Quantile { - if m != nil { - return m.Quantile +func (x *Summary) GetQuantile() []*Quantile { + if x != nil { + return x.Quantile } return nil } type Untyped struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Untyped) Reset() { *m = Untyped{} } -func (m *Untyped) String() string { return proto.CompactTextString(m) } -func (*Untyped) ProtoMessage() {} -func (*Untyped) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{5} + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` } -func (m *Untyped) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Untyped.Unmarshal(m, b) -} -func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) -} -func (m *Untyped) XXX_Merge(src proto.Message) { - xxx_messageInfo_Untyped.Merge(m, src) +func (x *Untyped) Reset() { + *x = Untyped{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Untyped) XXX_Size() int { - return xxx_messageInfo_Untyped.Size(m) + +func (x *Untyped) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Untyped) XXX_DiscardUnknown() { - xxx_messageInfo_Untyped.DiscardUnknown(m) + +func (*Untyped) ProtoMessage() {} + +func (x *Untyped) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Untyped proto.InternalMessageInfo +// Deprecated: Use Untyped.ProtoReflect.Descriptor instead. +func (*Untyped) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{5} +} -func (m *Untyped) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Untyped) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Histogram struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` + SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0. SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` // Buckets for the conventional histogram. - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional. // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and // then each power of two is divided into 2^n logarithmic buckets. // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). // In the future, more bucket schemas may be added using numbers < -4 or > 8. Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"` - ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` - ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` - ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` + ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` // Breadth of the zero bucket. + ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` // Count in zero bucket. + ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` // Overrides sb_zero_count if > 0. // Negative buckets for the native histogram. NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"` // Use either "negative_delta" or "negative_count", the former for // regular histograms with integer counts, the latter for float // histograms. - NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` - NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` + NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket. // Positive buckets for the native histogram. PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"` // Use either "positive_delta" or "positive_count", the former for // regular histograms with integer counts, the latter for float // histograms. - PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` - PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket. } -func (m *Histogram) Reset() { *m = Histogram{} } -func (m *Histogram) String() string { return proto.CompactTextString(m) } -func (*Histogram) ProtoMessage() {} -func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{6} +func (x *Histogram) Reset() { + *x = Histogram{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Histogram) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Histogram.Unmarshal(m, b) +func (x *Histogram) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) -} -func (m *Histogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_Histogram.Merge(m, src) -} -func (m *Histogram) XXX_Size() int { - return xxx_messageInfo_Histogram.Size(m) -} -func (m *Histogram) XXX_DiscardUnknown() { - xxx_messageInfo_Histogram.DiscardUnknown(m) + +func (*Histogram) ProtoMessage() {} + +func (x *Histogram) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Histogram proto.InternalMessageInfo +// Deprecated: Use Histogram.ProtoReflect.Descriptor instead. +func (*Histogram) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{6} +} -func (m *Histogram) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount +func (x *Histogram) GetSampleCount() uint64 { + if x != nil && x.SampleCount != nil { + return *x.SampleCount } return 0 } -func (m *Histogram) GetSampleCountFloat() float64 { - if m != nil && m.SampleCountFloat != nil { - return *m.SampleCountFloat +func (x *Histogram) GetSampleCountFloat() float64 { + if x != nil && x.SampleCountFloat != nil { + return *x.SampleCountFloat } return 0 } -func (m *Histogram) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum +func (x *Histogram) GetSampleSum() float64 { + if x != nil && x.SampleSum != nil { + return *x.SampleSum } return 0 } -func (m *Histogram) GetBucket() []*Bucket { - if m != nil { - return m.Bucket +func (x *Histogram) GetBucket() []*Bucket { + if x != nil { + return x.Bucket } return nil } -func (m *Histogram) GetSchema() int32 { - if m != nil && m.Schema != nil { - return *m.Schema +func (x *Histogram) GetSchema() int32 { + if x != nil && x.Schema != nil { + return *x.Schema } return 0 } -func (m *Histogram) GetZeroThreshold() float64 { - if m != nil && m.ZeroThreshold != nil { - return *m.ZeroThreshold +func (x *Histogram) GetZeroThreshold() float64 { + if x != nil && x.ZeroThreshold != nil { + return *x.ZeroThreshold } return 0 } -func (m *Histogram) GetZeroCount() uint64 { - if m != nil && m.ZeroCount != nil { - return *m.ZeroCount +func (x *Histogram) GetZeroCount() uint64 { + if x != nil && x.ZeroCount != nil { + return *x.ZeroCount } return 0 } -func (m *Histogram) GetZeroCountFloat() float64 { - if m != nil && m.ZeroCountFloat != nil { - return *m.ZeroCountFloat +func (x *Histogram) GetZeroCountFloat() float64 { + if x != nil && x.ZeroCountFloat != nil { + return *x.ZeroCountFloat } return 0 } -func (m *Histogram) GetNegativeSpan() []*BucketSpan { - if m != nil { - return m.NegativeSpan +func (x *Histogram) GetNegativeSpan() []*BucketSpan { + if x != nil { + return x.NegativeSpan } return nil } -func (m *Histogram) GetNegativeDelta() []int64 { - if m != nil { - return m.NegativeDelta +func (x *Histogram) GetNegativeDelta() []int64 { + if x != nil { + return x.NegativeDelta } return nil } -func (m *Histogram) GetNegativeCount() []float64 { - if m != nil { - return m.NegativeCount +func (x *Histogram) GetNegativeCount() []float64 { + if x != nil { + return x.NegativeCount } return nil } -func (m *Histogram) GetPositiveSpan() []*BucketSpan { - if m != nil { - return m.PositiveSpan +func (x *Histogram) GetPositiveSpan() []*BucketSpan { + if x != nil { + return x.PositiveSpan } return nil } -func (m *Histogram) GetPositiveDelta() []int64 { - if m != nil { - return m.PositiveDelta +func (x *Histogram) GetPositiveDelta() []int64 { + if x != nil { + return x.PositiveDelta } return nil } -func (m *Histogram) GetPositiveCount() []float64 { - if m != nil { - return m.PositiveCount +func (x *Histogram) GetPositiveCount() []float64 { + if x != nil { + return x.PositiveCount } return nil } @@ -513,64 +598,72 @@ func (m *Histogram) GetPositiveCount() []float64 { // A Bucket of a conventional histogram, each of which is treated as // an individual counter-like time series by Prometheus. type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` - CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` // Cumulative in increasing order. + CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` // Overrides cumulative_count if > 0. + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` // Inclusive. Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *Bucket) Reset() { *m = Bucket{} } -func (m *Bucket) String() string { return proto.CompactTextString(m) } -func (*Bucket) ProtoMessage() {} -func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{7} +func (x *Bucket) Reset() { + *x = Bucket{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Bucket) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Bucket.Unmarshal(m, b) -} -func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) -} -func (m *Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_Bucket.Merge(m, src) -} -func (m *Bucket) XXX_Size() int { - return xxx_messageInfo_Bucket.Size(m) +func (x *Bucket) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Bucket) XXX_DiscardUnknown() { - xxx_messageInfo_Bucket.DiscardUnknown(m) + +func (*Bucket) ProtoMessage() {} + +func (x *Bucket) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Bucket proto.InternalMessageInfo +// Deprecated: Use Bucket.ProtoReflect.Descriptor instead. +func (*Bucket) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{7} +} -func (m *Bucket) GetCumulativeCount() uint64 { - if m != nil && m.CumulativeCount != nil { - return *m.CumulativeCount +func (x *Bucket) GetCumulativeCount() uint64 { + if x != nil && x.CumulativeCount != nil { + return *x.CumulativeCount } return 0 } -func (m *Bucket) GetCumulativeCountFloat() float64 { - if m != nil && m.CumulativeCountFloat != nil { - return *m.CumulativeCountFloat +func (x *Bucket) GetCumulativeCountFloat() float64 { + if x != nil && x.CumulativeCountFloat != nil { + return *x.CumulativeCountFloat } return 0 } -func (m *Bucket) GetUpperBound() float64 { - if m != nil && m.UpperBound != nil { - return *m.UpperBound +func (x *Bucket) GetUpperBound() float64 { + if x != nil && x.UpperBound != nil { + return *x.UpperBound } return 0 } -func (m *Bucket) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar +func (x *Bucket) GetExemplar() *Exemplar { + if x != nil { + return x.Exemplar } return nil } @@ -582,333 +675,658 @@ func (m *Bucket) GetExemplar() *Exemplar { // structured here (with all the buckets in a single array separate // from the Spans). type BucketSpan struct { - Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` - Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *BucketSpan) Reset() { *m = BucketSpan{} } -func (m *BucketSpan) String() string { return proto.CompactTextString(m) } -func (*BucketSpan) ProtoMessage() {} -func (*BucketSpan) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{8} + Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative). + Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` // Length of consecutive buckets. } -func (m *BucketSpan) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BucketSpan.Unmarshal(m, b) -} -func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) -} -func (m *BucketSpan) XXX_Merge(src proto.Message) { - xxx_messageInfo_BucketSpan.Merge(m, src) +func (x *BucketSpan) Reset() { + *x = BucketSpan{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *BucketSpan) XXX_Size() int { - return xxx_messageInfo_BucketSpan.Size(m) + +func (x *BucketSpan) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *BucketSpan) XXX_DiscardUnknown() { - xxx_messageInfo_BucketSpan.DiscardUnknown(m) + +func (*BucketSpan) ProtoMessage() {} + +func (x *BucketSpan) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_BucketSpan proto.InternalMessageInfo +// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead. +func (*BucketSpan) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{8} +} -func (m *BucketSpan) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset +func (x *BucketSpan) GetOffset() int32 { + if x != nil && x.Offset != nil { + return *x.Offset } return 0 } -func (m *BucketSpan) GetLength() uint32 { - if m != nil && m.Length != nil { - return *m.Length +func (x *BucketSpan) GetLength() uint32 { + if x != nil && x.Length != nil { + return *x.Length } return 0 } type Exemplar struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Exemplar) Reset() { *m = Exemplar{} } -func (m *Exemplar) String() string { return proto.CompactTextString(m) } -func (*Exemplar) ProtoMessage() {} -func (*Exemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{9} + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` // OpenMetrics-style. } -func (m *Exemplar) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Exemplar.Unmarshal(m, b) -} -func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) -} -func (m *Exemplar) XXX_Merge(src proto.Message) { - xxx_messageInfo_Exemplar.Merge(m, src) +func (x *Exemplar) Reset() { + *x = Exemplar{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Exemplar) XXX_Size() int { - return xxx_messageInfo_Exemplar.Size(m) + +func (x *Exemplar) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Exemplar) XXX_DiscardUnknown() { - xxx_messageInfo_Exemplar.DiscardUnknown(m) + +func (*Exemplar) ProtoMessage() {} + +func (x *Exemplar) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Exemplar proto.InternalMessageInfo +// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead. +func (*Exemplar) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{9} +} -func (m *Exemplar) GetLabel() []*LabelPair { - if m != nil { - return m.Label +func (x *Exemplar) GetLabel() []*LabelPair { + if x != nil { + return x.Label } return nil } -func (m *Exemplar) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Exemplar) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } -func (m *Exemplar) GetTimestamp() *timestamp.Timestamp { - if m != nil { - return m.Timestamp +func (x *Exemplar) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp } return nil } type Metric struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` - TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{10} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` +} + +func (x *Metric) Reset() { + *x = Metric{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Metric) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metric.Unmarshal(m, b) +func (x *Metric) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) -} -func (m *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(m, src) -} -func (m *Metric) XXX_Size() int { - return xxx_messageInfo_Metric.Size(m) -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) + +func (*Metric) ProtoMessage() {} + +func (x *Metric) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Metric proto.InternalMessageInfo +// Deprecated: Use Metric.ProtoReflect.Descriptor instead. +func (*Metric) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{10} +} -func (m *Metric) GetLabel() []*LabelPair { - if m != nil { - return m.Label +func (x *Metric) GetLabel() []*LabelPair { + if x != nil { + return x.Label } return nil } -func (m *Metric) GetGauge() *Gauge { - if m != nil { - return m.Gauge +func (x *Metric) GetGauge() *Gauge { + if x != nil { + return x.Gauge } return nil } -func (m *Metric) GetCounter() *Counter { - if m != nil { - return m.Counter +func (x *Metric) GetCounter() *Counter { + if x != nil { + return x.Counter } return nil } -func (m *Metric) GetSummary() *Summary { - if m != nil { - return m.Summary +func (x *Metric) GetSummary() *Summary { + if x != nil { + return x.Summary } return nil } -func (m *Metric) GetUntyped() *Untyped { - if m != nil { - return m.Untyped +func (x *Metric) GetUntyped() *Untyped { + if x != nil { + return x.Untyped } return nil } -func (m *Metric) GetHistogram() *Histogram { - if m != nil { - return m.Histogram +func (x *Metric) GetHistogram() *Histogram { + if x != nil { + return x.Histogram } return nil } -func (m *Metric) GetTimestampMs() int64 { - if m != nil && m.TimestampMs != nil { - return *m.TimestampMs +func (x *Metric) GetTimestampMs() int64 { + if x != nil && x.TimestampMs != nil { + return *x.TimestampMs } return 0 } type MetricFamily struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` - Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetricFamily) Reset() { *m = MetricFamily{} } -func (m *MetricFamily) String() string { return proto.CompactTextString(m) } -func (*MetricFamily) ProtoMessage() {} -func (*MetricFamily) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{11} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` +} + +func (x *MetricFamily) Reset() { + *x = MetricFamily{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *MetricFamily) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetricFamily.Unmarshal(m, b) -} -func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) -} -func (m *MetricFamily) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricFamily.Merge(m, src) -} -func (m *MetricFamily) XXX_Size() int { - return xxx_messageInfo_MetricFamily.Size(m) +func (x *MetricFamily) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *MetricFamily) XXX_DiscardUnknown() { - xxx_messageInfo_MetricFamily.DiscardUnknown(m) + +func (*MetricFamily) ProtoMessage() {} + +func (x *MetricFamily) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_MetricFamily proto.InternalMessageInfo +// Deprecated: Use MetricFamily.ProtoReflect.Descriptor instead. +func (*MetricFamily) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{11} +} -func (m *MetricFamily) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (x *MetricFamily) GetName() string { + if x != nil && x.Name != nil { + return *x.Name } return "" } -func (m *MetricFamily) GetHelp() string { - if m != nil && m.Help != nil { - return *m.Help +func (x *MetricFamily) GetHelp() string { + if x != nil && x.Help != nil { + return *x.Help } return "" } -func (m *MetricFamily) GetType() MetricType { - if m != nil && m.Type != nil { - return *m.Type +func (x *MetricFamily) GetType() MetricType { + if x != nil && x.Type != nil { + return *x.Type } return MetricType_COUNTER } -func (m *MetricFamily) GetMetric() []*Metric { - if m != nil { - return m.Metric +func (x *MetricFamily) GetMetric() []*Metric { + if x != nil { + return x.Metric } return nil } -func init() { - proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) - proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") - proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") - proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") - proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") - proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") - proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") - proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") - proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") - proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan") - proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") - proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") - proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") -} - -func init() { - proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258) -} - -var fileDescriptor_d1e5ddb18987a258 = []byte{ - // 896 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x8e, 0xdb, 0x44, - 0x18, 0xc5, 0x9b, 0x5f, 0x7f, 0xd9, 0x6c, 0xd3, 0x61, 0x55, 0x59, 0x0b, 0xcb, 0x06, 0x4b, 0x48, - 0x0b, 0x42, 0x8e, 0x40, 0x5b, 0x81, 0x0a, 0x5c, 0xec, 0xb6, 0xe9, 0x16, 0x89, 0xb4, 0x65, 0x92, - 0x5c, 0x14, 0x2e, 0xac, 0x49, 0x32, 0xeb, 0x58, 0x78, 0x3c, 0xc6, 0x1e, 0x57, 0x2c, 0x2f, 0xc0, - 0x35, 0xaf, 0xc0, 0xc3, 0xf0, 0x22, 0x3c, 0x08, 0x68, 0xfe, 0xec, 0xdd, 0xe2, 0x94, 0xd2, 0x3b, - 0x7f, 0x67, 0xce, 0xf7, 0xcd, 0x39, 0xe3, 0xc9, 0x71, 0xc0, 0x8f, 0xf9, 0x24, 0xcb, 0x39, 0xa3, - 0x62, 0x4b, 0xcb, 0x62, 0xb2, 0x4e, 0x62, 0x9a, 0x8a, 0x09, 0xa3, 0x22, 0x8f, 0xd7, 0x45, 0x90, - 0xe5, 0x5c, 0x70, 0x74, 0x18, 0xf3, 0xa0, 0xe6, 0x04, 0x9a, 0x73, 0x74, 0x12, 0x71, 0x1e, 0x25, - 0x74, 0xa2, 0x38, 0xab, 0xf2, 0x6a, 0x22, 0x62, 0x46, 0x0b, 0x41, 0x58, 0xa6, 0xdb, 0xfc, 0xfb, - 0xe0, 0x7e, 0x47, 0x56, 0x34, 0x79, 0x4e, 0xe2, 0x1c, 0x21, 0x68, 0xa7, 0x84, 0x51, 0xcf, 0x19, - 0x3b, 0xa7, 0x2e, 0x56, 0xcf, 0xe8, 0x10, 0x3a, 0x2f, 0x49, 0x52, 0x52, 0x6f, 0x4f, 0x81, 0xba, - 0xf0, 0x8f, 0xa1, 0x73, 0x49, 0xca, 0xe8, 0xc6, 0xb2, 0xec, 0x71, 0xec, 0xf2, 0x8f, 0xd0, 0x7b, - 0xc8, 0xcb, 0x54, 0xd0, 0xbc, 0x99, 0x80, 0x1e, 0x40, 0x9f, 0xfe, 0x42, 0x59, 0x96, 0x90, 0x5c, - 0x0d, 0x1e, 0x7c, 0xfe, 0x41, 0xd0, 0x64, 0x20, 0x98, 0x1a, 0x16, 0xae, 0xf8, 0xfe, 0xd7, 0xd0, - 0xff, 0xbe, 0x24, 0xa9, 0x88, 0x13, 0x8a, 0x8e, 0xa0, 0xff, 0xb3, 0x79, 0x36, 0x1b, 0x54, 0xf5, - 0x6d, 0xe5, 0x95, 0xb4, 0xdf, 0x1c, 0xe8, 0xcd, 0x4b, 0xc6, 0x48, 0x7e, 0x8d, 0x3e, 0x84, 0xfd, - 0x82, 0xb0, 0x2c, 0xa1, 0xe1, 0x5a, 0xaa, 0x55, 0x13, 0xda, 0x78, 0xa0, 0x31, 0x65, 0x00, 0x1d, - 0x03, 0x18, 0x4a, 0x51, 0x32, 0x33, 0xc9, 0xd5, 0xc8, 0xbc, 0x64, 0xd2, 0x47, 0xb5, 0x7f, 0x6b, - 0xdc, 0xda, 0xed, 0xc3, 0x2a, 0xae, 0xf5, 0xf9, 0x27, 0xd0, 0x5b, 0xa6, 0xe2, 0x3a, 0xa3, 0x9b, - 0x1d, 0xa7, 0xf8, 0x57, 0x1b, 0xdc, 0x27, 0x71, 0x21, 0x78, 0x94, 0x13, 0xf6, 0x26, 0x62, 0x3f, - 0x05, 0x74, 0x93, 0x12, 0x5e, 0x25, 0x9c, 0x08, 0xaf, 0xad, 0x66, 0x8e, 0x6e, 0x10, 0x1f, 0x4b, - 0xfc, 0xbf, 0xac, 0x9d, 0x41, 0x77, 0x55, 0xae, 0x7f, 0xa2, 0xc2, 0x18, 0x7b, 0xbf, 0xd9, 0xd8, - 0x85, 0xe2, 0x60, 0xc3, 0x45, 0xf7, 0xa0, 0x5b, 0xac, 0xb7, 0x94, 0x11, 0xaf, 0x33, 0x76, 0x4e, - 0xef, 0x62, 0x53, 0xa1, 0x8f, 0xe0, 0xe0, 0x57, 0x9a, 0xf3, 0x50, 0x6c, 0x73, 0x5a, 0x6c, 0x79, - 0xb2, 0xf1, 0xba, 0x6a, 0xc3, 0xa1, 0x44, 0x17, 0x16, 0x94, 0x9a, 0x14, 0x4d, 0x5b, 0xec, 0x29, - 0x8b, 0xae, 0x44, 0xb4, 0xc1, 0x53, 0x18, 0xd5, 0xcb, 0xc6, 0x5e, 0x5f, 0xcd, 0x39, 0xa8, 0x48, - 0xda, 0xdc, 0x14, 0x86, 0x29, 0x8d, 0x88, 0x88, 0x5f, 0xd2, 0xb0, 0xc8, 0x48, 0xea, 0xb9, 0xca, - 0xc4, 0xf8, 0x75, 0x26, 0xe6, 0x19, 0x49, 0xf1, 0xbe, 0x6d, 0x93, 0x95, 0x94, 0x5d, 0x8d, 0xd9, - 0xd0, 0x44, 0x10, 0x0f, 0xc6, 0xad, 0x53, 0x84, 0xab, 0xe1, 0x8f, 0x24, 0x78, 0x8b, 0xa6, 0xa5, - 0x0f, 0xc6, 0x2d, 0xe9, 0xce, 0xa2, 0x5a, 0xfe, 0x14, 0x86, 0x19, 0x2f, 0xe2, 0x5a, 0xd4, 0xfe, - 0x9b, 0x8a, 0xb2, 0x6d, 0x56, 0x54, 0x35, 0x46, 0x8b, 0x1a, 0x6a, 0x51, 0x16, 0xad, 0x44, 0x55, - 0x34, 0x2d, 0xea, 0x40, 0x8b, 0xb2, 0xa8, 0x12, 0xe5, 0xff, 0xe9, 0x40, 0x57, 0x6f, 0x85, 0x3e, - 0x86, 0xd1, 0xba, 0x64, 0x65, 0x72, 0xd3, 0x88, 0xbe, 0x66, 0x77, 0x6a, 0x5c, 0x5b, 0x39, 0x83, - 0x7b, 0xaf, 0x52, 0x6f, 0x5d, 0xb7, 0xc3, 0x57, 0x1a, 0xf4, 0x5b, 0x39, 0x81, 0x41, 0x99, 0x65, - 0x34, 0x0f, 0x57, 0xbc, 0x4c, 0x37, 0xe6, 0xce, 0x81, 0x82, 0x2e, 0x24, 0x72, 0x2b, 0x17, 0x5a, - 0xff, 0x3b, 0x17, 0xa0, 0x3e, 0x32, 0x79, 0x11, 0xf9, 0xd5, 0x55, 0x41, 0xb5, 0x83, 0xbb, 0xd8, - 0x54, 0x12, 0x4f, 0x68, 0x1a, 0x89, 0xad, 0xda, 0x7d, 0x88, 0x4d, 0xe5, 0xff, 0xee, 0x40, 0xdf, - 0x0e, 0x45, 0xf7, 0xa1, 0x93, 0xc8, 0x54, 0xf4, 0x1c, 0xf5, 0x82, 0x4e, 0x9a, 0x35, 0x54, 0xc1, - 0x89, 0x35, 0xbb, 0x39, 0x71, 0xd0, 0x97, 0xe0, 0x56, 0xa9, 0x6b, 0x4c, 0x1d, 0x05, 0x3a, 0x97, - 0x03, 0x9b, 0xcb, 0xc1, 0xc2, 0x32, 0x70, 0x4d, 0xf6, 0xff, 0xde, 0x83, 0xee, 0x4c, 0xa5, 0xfc, - 0xdb, 0x2a, 0xfa, 0x0c, 0x3a, 0x91, 0xcc, 0x69, 0x13, 0xb2, 0xef, 0x35, 0xb7, 0xa9, 0x28, 0xc7, - 0x9a, 0x89, 0xbe, 0x80, 0xde, 0x5a, 0x67, 0xb7, 0x11, 0x7b, 0xdc, 0xdc, 0x64, 0x02, 0x1e, 0x5b, - 0xb6, 0x6c, 0x2c, 0x74, 0xb0, 0xaa, 0x3b, 0xb0, 0xb3, 0xd1, 0xa4, 0x2f, 0xb6, 0x6c, 0xd9, 0x58, - 0xea, 0x20, 0x54, 0xa1, 0xb1, 0xb3, 0xd1, 0xa4, 0x25, 0xb6, 0x6c, 0xf4, 0x0d, 0xb8, 0x5b, 0x9b, - 0x8f, 0x2a, 0x2c, 0x76, 0x1e, 0x4c, 0x15, 0xa3, 0xb8, 0xee, 0x90, 0x89, 0x5a, 0x9d, 0x75, 0xc8, - 0x0a, 0x95, 0x48, 0x2d, 0x3c, 0xa8, 0xb0, 0x59, 0xe1, 0xff, 0xe1, 0xc0, 0xbe, 0x7e, 0x03, 0x8f, - 0x09, 0x8b, 0x93, 0xeb, 0xc6, 0x4f, 0x24, 0x82, 0xf6, 0x96, 0x26, 0x99, 0xf9, 0x42, 0xaa, 0x67, - 0x74, 0x06, 0x6d, 0xa9, 0x51, 0x1d, 0xe1, 0xc1, 0xae, 0x5f, 0xb8, 0x9e, 0xbc, 0xb8, 0xce, 0x28, - 0x56, 0x6c, 0x99, 0xb9, 0xfa, 0xab, 0xee, 0xb5, 0x5f, 0x97, 0xb9, 0xba, 0x0f, 0x1b, 0xee, 0x27, - 0x2b, 0x80, 0x7a, 0x12, 0x1a, 0x40, 0xef, 0xe1, 0xb3, 0xe5, 0xd3, 0xc5, 0x14, 0x8f, 0xde, 0x41, - 0x2e, 0x74, 0x2e, 0xcf, 0x97, 0x97, 0xd3, 0x91, 0x23, 0xf1, 0xf9, 0x72, 0x36, 0x3b, 0xc7, 0x2f, - 0x46, 0x7b, 0xb2, 0x58, 0x3e, 0x5d, 0xbc, 0x78, 0x3e, 0x7d, 0x34, 0x6a, 0xa1, 0x21, 0xb8, 0x4f, - 0xbe, 0x9d, 0x2f, 0x9e, 0x5d, 0xe2, 0xf3, 0xd9, 0xa8, 0x8d, 0xde, 0x85, 0x3b, 0xaa, 0x27, 0xac, - 0xc1, 0xce, 0x05, 0x86, 0xc6, 0x3f, 0x18, 0x3f, 0x3c, 0x88, 0x62, 0xb1, 0x2d, 0x57, 0xc1, 0x9a, - 0xb3, 0x7f, 0xff, 0x45, 0x09, 0x19, 0xdf, 0xd0, 0x64, 0x12, 0xf1, 0xaf, 0x62, 0x1e, 0xd6, 0xab, - 0xa1, 0x5e, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x16, 0x77, 0x81, 0x98, 0xd7, 0x08, 0x00, 0x00, +var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor + +var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0x5b, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, + 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, + 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x87, 0x01, 0x0a, + 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, + 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xe3, 0x04, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74, + 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, + 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, + 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, + 0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, + 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, + 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, + 0x6f, 0x61, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x73, 0x70, 0x61, 0x6e, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, + 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, + 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, + 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, + 0x6e, 0x52, 0x0c, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, + 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, + 0x61, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, + 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, + 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, + 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, + 0x75, 0x70, 0x70, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, + 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, + 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, + 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, + 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, + 0x75, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, + 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, + 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, + 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, + 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, + 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, + 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, + 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, + 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, + 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, + 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, + 0x62, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, + 0x07, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, + 0x55, 0x47, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, + 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, + 0x0d, 0x0a, 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, + 0x0a, 0x0f, 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, + 0x4d, 0x10, 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, + 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, + 0x75, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, + 0x67, 0x6f, 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, +} + +var ( + file_io_prometheus_client_metrics_proto_rawDescOnce sync.Once + file_io_prometheus_client_metrics_proto_rawDescData = file_io_prometheus_client_metrics_proto_rawDesc +) + +func file_io_prometheus_client_metrics_proto_rawDescGZIP() []byte { + file_io_prometheus_client_metrics_proto_rawDescOnce.Do(func() { + file_io_prometheus_client_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_client_metrics_proto_rawDescData) + }) + return file_io_prometheus_client_metrics_proto_rawDescData +} + +var file_io_prometheus_client_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_io_prometheus_client_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{ + (MetricType)(0), // 0: io.prometheus.client.MetricType + (*LabelPair)(nil), // 1: io.prometheus.client.LabelPair + (*Gauge)(nil), // 2: io.prometheus.client.Gauge + (*Counter)(nil), // 3: io.prometheus.client.Counter + (*Quantile)(nil), // 4: io.prometheus.client.Quantile + (*Summary)(nil), // 5: io.prometheus.client.Summary + (*Untyped)(nil), // 6: io.prometheus.client.Untyped + (*Histogram)(nil), // 7: io.prometheus.client.Histogram + (*Bucket)(nil), // 8: io.prometheus.client.Bucket + (*BucketSpan)(nil), // 9: io.prometheus.client.BucketSpan + (*Exemplar)(nil), // 10: io.prometheus.client.Exemplar + (*Metric)(nil), // 11: io.prometheus.client.Metric + (*MetricFamily)(nil), // 12: io.prometheus.client.MetricFamily + (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp +} +var file_io_prometheus_client_metrics_proto_depIdxs = []int32{ + 10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar + 4, // 1: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile + 8, // 2: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket + 9, // 3: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan + 9, // 4: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan + 10, // 5: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar + 1, // 6: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair + 13, // 7: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp + 1, // 8: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair + 2, // 9: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge + 3, // 10: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter + 5, // 11: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary + 6, // 12: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped + 7, // 13: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram + 0, // 14: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType + 11, // 15: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric + 16, // [16:16] is the sub-list for method output_type + 16, // [16:16] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name +} + +func init() { file_io_prometheus_client_metrics_proto_init() } +func file_io_prometheus_client_metrics_proto_init() { + if File_io_prometheus_client_metrics_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_io_prometheus_client_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LabelPair); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Gauge); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Counter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Quantile); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Summary); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Untyped); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Histogram); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BucketSpan); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Exemplar); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metric); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricFamily); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_io_prometheus_client_metrics_proto_rawDesc, + NumEnums: 1, + NumMessages: 12, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_io_prometheus_client_metrics_proto_goTypes, + DependencyIndexes: file_io_prometheus_client_metrics_proto_depIdxs, + EnumInfos: file_io_prometheus_client_metrics_proto_enumTypes, + MessageInfos: file_io_prometheus_client_metrics_proto_msgTypes, + }.Build() + File_io_prometheus_client_metrics_proto = out.File + file_io_prometheus_client_metrics_proto_rawDesc = nil + file_io_prometheus_client_metrics_proto_goTypes = nil + file_io_prometheus_client_metrics_proto_depIdxs = nil } diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index c909b8aa8..5727452c1 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -18,7 +18,6 @@ import ( "errors" "fmt" "math" - "regexp" "strconv" "strings" "time" @@ -183,54 +182,78 @@ func (d *Duration) Type() string { return "duration" } -var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$") +func isdigit(c byte) bool { return c >= '0' && c <= '9' } + +// Units are required to go in order from biggest to smallest. +// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day. +var unitMap = map[string]struct { + pos int + mult uint64 +}{ + "ms": {7, uint64(time.Millisecond)}, + "s": {6, uint64(time.Second)}, + "m": {5, uint64(time.Minute)}, + "h": {4, uint64(time.Hour)}, + "d": {3, uint64(24 * time.Hour)}, + "w": {2, uint64(7 * 24 * time.Hour)}, + "y": {1, uint64(365 * 24 * time.Hour)}, +} // ParseDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. -func ParseDuration(durationStr string) (Duration, error) { - switch durationStr { +func ParseDuration(s string) (Duration, error) { + switch s { case "0": // Allow 0 without a unit. return 0, nil case "": return 0, errors.New("empty duration string") } - matches := durationRE.FindStringSubmatch(durationStr) - if matches == nil { - return 0, fmt.Errorf("not a valid duration string: %q", durationStr) - } - var dur time.Duration - // Parse the match at pos `pos` in the regex and use `mult` to turn that - // into ms, then add that value to the total parsed duration. - var overflowErr error - m := func(pos int, mult time.Duration) { - if matches[pos] == "" { - return + orig := s + var dur uint64 + lastUnitPos := 0 + + for s != "" { + if !isdigit(s[0]) { + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + // Consume [0-9]* + i := 0 + for ; i < len(s) && isdigit(s[i]); i++ { + } + v, err := strconv.ParseUint(s[:i], 10, 0) + if err != nil { + return 0, fmt.Errorf("not a valid duration string: %q", orig) } - n, _ := strconv.Atoi(matches[pos]) + s = s[i:] + // Consume unit. + for i = 0; i < len(s) && !isdigit(s[i]); i++ { + } + if i == 0 { + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + u := s[:i] + s = s[i:] + unit, ok := unitMap[u] + if !ok { + return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig) + } + if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest. + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + lastUnitPos = unit.pos // Check if the provided duration overflows time.Duration (> ~ 290years). - if n > int((1<<63-1)/mult/time.Millisecond) { - overflowErr = errors.New("duration out of range") + if v > 1<<63/unit.mult { + return 0, errors.New("duration out of range") } - d := time.Duration(n) * time.Millisecond - dur += d * mult - - if dur < 0 { - overflowErr = errors.New("duration out of range") + dur += v * unit.mult + if dur > 1<<63-1 { + return 0, errors.New("duration out of range") } } - - m(2, 1000*60*60*24*365) // y - m(4, 1000*60*60*24*7) // w - m(6, 1000*60*60*24) // d - m(8, 1000*60*60) // h - m(10, 1000*60) // m - m(12, 1000) // s - m(14, 1) // ms - - return Duration(dur), overflowErr + return Duration(dur), nil } func (d Duration) String() string { diff --git a/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go index 0ffb31560..a411d542c 100644 --- a/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go +++ b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go @@ -1,7 +1,6 @@ package jsonpatch import ( - "bytes" "encoding/json" "fmt" "reflect" @@ -24,21 +23,28 @@ func (j *Operation) Json() string { } func (j *Operation) MarshalJSON() ([]byte, error) { - var b bytes.Buffer - b.WriteString("{") - b.WriteString(fmt.Sprintf(`"op":"%s"`, j.Operation)) - b.WriteString(fmt.Sprintf(`,"path":"%s"`, j.Path)) - // Consider omitting Value for non-nullable operations. - if j.Value != nil || j.Operation == "replace" || j.Operation == "add" { - v, err := json.Marshal(j.Value) - if err != nil { - return nil, err - } - b.WriteString(`,"value":`) - b.Write(v) - } - b.WriteString("}") - return b.Bytes(), nil + // Ensure for add and replace we emit `value: null` + if j.Value == nil && (j.Operation == "replace" || j.Operation == "add") { + return json.Marshal(struct { + Operation string `json:"op"` + Path string `json:"path"` + Value interface{} `json:"value"` + }{ + Operation: j.Operation, + Path: j.Path, + }) + } + // otherwise just marshal normally. We cannot literally do json.Marshal(j) as it would be recursively + // calling this function. + return json.Marshal(struct { + Operation string `json:"op"` + Path string `json:"path"` + Value interface{} `json:"value,omitempty"` + }{ + Operation: j.Operation, + Path: j.Path, + Value: j.Value, + }) } type ByPath []Operation @@ -149,9 +155,6 @@ func makePath(path string, newPart interface{}) string { if path == "" { return "/" + key } - if strings.HasSuffix(path, "/") { - return path + key - } return path + "/" + key } @@ -211,22 +214,18 @@ func handleValues(av, bv interface{}, p string, patch []Operation) ([]Operation, } case []interface{}: bt := bv.([]interface{}) - if isSimpleArray(at) && isSimpleArray(bt) { - patch = append(patch, compareEditDistance(at, bt, p)...) - } else { - n := min(len(at), len(bt)) - for i := len(at) - 1; i >= n; i-- { - patch = append(patch, NewOperation("remove", makePath(p, i), nil)) - } - for i := n; i < len(bt); i++ { - patch = append(patch, NewOperation("add", makePath(p, i), bt[i])) - } - for i := 0; i < n; i++ { - var err error - patch, err = handleValues(at[i], bt[i], makePath(p, i), patch) - if err != nil { - return nil, err - } + n := min(len(at), len(bt)) + for i := len(at) - 1; i >= n; i-- { + patch = append(patch, NewOperation("remove", makePath(p, i), nil)) + } + for i := n; i < len(bt); i++ { + patch = append(patch, NewOperation("add", makePath(p, i), bt[i])) + } + for i := 0; i < n; i++ { + var err error + patch, err = handleValues(at[i], bt[i], makePath(p, i), patch) + if err != nil { + return nil, err } } default: @@ -235,100 +234,9 @@ func handleValues(av, bv interface{}, p string, patch []Operation) ([]Operation, return patch, nil } -func isBasicType(a interface{}) bool { - switch a.(type) { - case string, float64, bool: - default: - return false - } - return true -} - -func isSimpleArray(a []interface{}) bool { - for i := range a { - switch a[i].(type) { - case string, float64, bool: - default: - val := reflect.ValueOf(a[i]) - if val.Kind() == reflect.Map { - for _, k := range val.MapKeys() { - av := val.MapIndex(k) - if av.Kind() == reflect.Ptr || av.Kind() == reflect.Interface { - if av.IsNil() { - continue - } - av = av.Elem() - } - if av.Kind() != reflect.String && av.Kind() != reflect.Float64 && av.Kind() != reflect.Bool { - return false - } - } - return true - } - return false - } - } - return true -} - -// https://en.wikipedia.org/wiki/Wagner%E2%80%93Fischer_algorithm -// Adapted from https://github.com/texttheater/golang-levenshtein -func compareEditDistance(s, t []interface{}, p string) []Operation { - m := len(s) - n := len(t) - - d := make([][]int, m+1) - for i := 0; i <= m; i++ { - d[i] = make([]int, n+1) - d[i][0] = i - } - for j := 0; j <= n; j++ { - d[0][j] = j - } - - for j := 1; j <= n; j++ { - for i := 1; i <= m; i++ { - if reflect.DeepEqual(s[i-1], t[j-1]) { - d[i][j] = d[i-1][j-1] // no op required - } else { - del := d[i-1][j] + 1 - add := d[i][j-1] + 1 - rep := d[i-1][j-1] + 1 - d[i][j] = min(rep, min(add, del)) - } - } - } - - return backtrace(s, t, p, m, n, d) -} - func min(x int, y int) int { if y < x { return y } return x } - -func backtrace(s, t []interface{}, p string, i int, j int, matrix [][]int) []Operation { - if i > 0 && matrix[i-1][j]+1 == matrix[i][j] { - op := NewOperation("remove", makePath(p, i-1), nil) - return append([]Operation{op}, backtrace(s, t, p, i-1, j, matrix)...) - } - if j > 0 && matrix[i][j-1]+1 == matrix[i][j] { - op := NewOperation("add", makePath(p, i), t[j-1]) - return append([]Operation{op}, backtrace(s, t, p, i, j-1, matrix)...) - } - if i > 0 && j > 0 && matrix[i-1][j-1]+1 == matrix[i][j] { - if isBasicType(s[0]) { - op := NewOperation("replace", makePath(p, i-1), t[j-1]) - return append([]Operation{op}, backtrace(s, t, p, i-1, j-1, matrix)...) - } - - p2, _ := handleValues(s[i-1], t[j-1], makePath(p, i-1), []Operation{}) - return append(p2, backtrace(s, t, p, i-1, j-1, matrix)...) - } - if i > 0 && j > 0 && matrix[i-1][j-1] == matrix[i][j] { - return backtrace(s, t, p, i-1, j-1, matrix) - } - return []Operation{} -} diff --git a/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go index f81594c91..1395a7e10 100644 --- a/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AdmissionRequest = map[string]string{ diff --git a/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go index 13067ad80..82598ed57 100644 --- a/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AdmissionRequest = map[string]string{ diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go index 6ac9e80ff..9a2d0bccd 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go @@ -44,10 +44,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *MatchCondition) Reset() { *m = MatchCondition{} } +func (*MatchCondition) ProtoMessage() {} +func (*MatchCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{0} +} +func (m *MatchCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MatchCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MatchCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_MatchCondition.Merge(m, src) +} +func (m *MatchCondition) XXX_Size() int { + return m.Size() +} +func (m *MatchCondition) XXX_DiscardUnknown() { + xxx_messageInfo_MatchCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_MatchCondition proto.InternalMessageInfo + func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } func (*MutatingWebhook) ProtoMessage() {} func (*MutatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{0} + return fileDescriptor_aaac5994f79683e8, []int{1} } func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +103,7 @@ var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } func (*MutatingWebhookConfiguration) ProtoMessage() {} func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{1} + return fileDescriptor_aaac5994f79683e8, []int{2} } func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +131,7 @@ var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } func (*MutatingWebhookConfigurationList) ProtoMessage() {} func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{2} + return fileDescriptor_aaac5994f79683e8, []int{3} } func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +159,7 @@ var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo func (m *Rule) Reset() { *m = Rule{} } func (*Rule) ProtoMessage() {} func (*Rule) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{3} + return fileDescriptor_aaac5994f79683e8, []int{4} } func (m *Rule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +187,7 @@ var xxx_messageInfo_Rule proto.InternalMessageInfo func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } func (*RuleWithOperations) ProtoMessage() {} func (*RuleWithOperations) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{4} + return fileDescriptor_aaac5994f79683e8, []int{5} } func (m *RuleWithOperations) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,7 +215,7 @@ var xxx_messageInfo_RuleWithOperations proto.InternalMessageInfo func (m *ServiceReference) Reset() { *m = ServiceReference{} } func (*ServiceReference) ProtoMessage() {} func (*ServiceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{5} + return fileDescriptor_aaac5994f79683e8, []int{6} } func (m *ServiceReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -215,7 +243,7 @@ var xxx_messageInfo_ServiceReference proto.InternalMessageInfo func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } func (*ValidatingWebhook) ProtoMessage() {} func (*ValidatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{6} + return fileDescriptor_aaac5994f79683e8, []int{7} } func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,7 +271,7 @@ var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } func (*ValidatingWebhookConfiguration) ProtoMessage() {} func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{7} + return fileDescriptor_aaac5994f79683e8, []int{8} } func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -271,7 +299,7 @@ var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } func (*ValidatingWebhookConfigurationList) ProtoMessage() {} func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{8} + return fileDescriptor_aaac5994f79683e8, []int{9} } func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -299,7 +327,7 @@ var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } func (*WebhookClientConfig) ProtoMessage() {} func (*WebhookClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{9} + return fileDescriptor_aaac5994f79683e8, []int{10} } func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -325,6 +353,7 @@ func (m *WebhookClientConfig) XXX_DiscardUnknown() { var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo func init() { + proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1.MatchCondition") proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhook") proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfiguration") proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfigurationList") @@ -342,79 +371,116 @@ func init() { } var fileDescriptor_aaac5994f79683e8 = []byte{ - // 1105 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xcf, 0xc6, 0x76, 0x63, 0x8f, 0xf3, 0xa7, 0x19, 0xa0, 0x35, 0xa1, 0xf2, 0x5a, 0xae, 0x84, - 0x8c, 0x80, 0xdd, 0x26, 0x94, 0x52, 0x71, 0x41, 0xd9, 0xf0, 0x47, 0x11, 0x49, 0x1b, 0x4d, 0xda, - 0x14, 0xa1, 0x1c, 0x3a, 0x5e, 0x8f, 0xed, 0x21, 0xf6, 0xce, 0x6a, 0x66, 0xd6, 0x90, 0x1b, 0x1f, - 0x81, 0xaf, 0x00, 0x9f, 0x82, 0x1b, 0xe2, 0x96, 0x63, 0x8f, 0x39, 0xa0, 0x85, 0x2c, 0x17, 0x0e, - 0x7c, 0x82, 0x9c, 0xd0, 0xcc, 0xae, 0x77, 0xfd, 0x27, 0x09, 0x56, 0x0e, 0x3d, 0xe5, 0xe6, 0xf9, - 0xbd, 0x79, 0xbf, 0x37, 0xef, 0xed, 0x7b, 0xef, 0x27, 0x83, 0x9d, 0xa3, 0xc7, 0xc2, 0xa2, 0xcc, - 0x3e, 0x0a, 0x9a, 0x84, 0x7b, 0x44, 0x12, 0x61, 0x0f, 0x88, 0xd7, 0x62, 0xdc, 0x4e, 0x0c, 0xd8, - 0xa7, 0x36, 0x6e, 0xf5, 0xa9, 0x10, 0x94, 0x79, 0x9c, 0x74, 0xa8, 0x90, 0x1c, 0x4b, 0xca, 0x3c, - 0x7b, 0xb0, 0x6e, 0x77, 0x88, 0x47, 0x38, 0x96, 0xa4, 0x65, 0xf9, 0x9c, 0x49, 0x06, 0xef, 0xc7, - 0x4e, 0x16, 0xf6, 0xa9, 0x75, 0xa1, 0x93, 0x35, 0x58, 0x5f, 0xfb, 0xb0, 0x43, 0x65, 0x37, 0x68, - 0x5a, 0x2e, 0xeb, 0xdb, 0x1d, 0xd6, 0x61, 0xb6, 0xf6, 0x6d, 0x06, 0x6d, 0x7d, 0xd2, 0x07, 0xfd, - 0x2b, 0xe6, 0x5c, 0x7b, 0x98, 0x3d, 0xa4, 0x8f, 0xdd, 0x2e, 0xf5, 0x08, 0x3f, 0xb6, 0xfd, 0xa3, - 0x8e, 0x02, 0x84, 0xdd, 0x27, 0x12, 0x5f, 0xf0, 0x92, 0x35, 0xfb, 0x32, 0x2f, 0x1e, 0x78, 0x92, - 0xf6, 0xc9, 0x94, 0xc3, 0xa3, 0xff, 0x73, 0x10, 0x6e, 0x97, 0xf4, 0xf1, 0xa4, 0x5f, 0xfd, 0xb7, - 0x05, 0xb0, 0xb2, 0x1b, 0x48, 0x2c, 0xa9, 0xd7, 0x79, 0x41, 0x9a, 0x5d, 0xc6, 0x8e, 0x60, 0x0d, - 0xe4, 0x3d, 0xdc, 0x27, 0x15, 0xa3, 0x66, 0x34, 0x4a, 0xce, 0xe2, 0x49, 0x68, 0xce, 0x45, 0xa1, - 0x99, 0x7f, 0x82, 0xfb, 0x04, 0x69, 0x0b, 0xe4, 0x60, 0xd1, 0xed, 0x51, 0xe2, 0xc9, 0x2d, 0xe6, - 0xb5, 0x69, 0xa7, 0x32, 0x5f, 0x33, 0x1a, 0xe5, 0x8d, 0xc7, 0xd6, 0x0c, 0xf5, 0xb3, 0x92, 0x28, - 0x5b, 0x23, 0xfe, 0xce, 0x9b, 0x49, 0x8c, 0xc5, 0x51, 0x14, 0x8d, 0xc5, 0x80, 0x87, 0xa0, 0xc0, - 0x83, 0x1e, 0x11, 0x95, 0x5c, 0x2d, 0xd7, 0x28, 0x6f, 0x7c, 0x32, 0x53, 0x30, 0x14, 0xf4, 0xc8, - 0x0b, 0x2a, 0xbb, 0x4f, 0x7d, 0x12, 0x83, 0xc2, 0x59, 0x4a, 0x62, 0x15, 0x94, 0x4d, 0xa0, 0x98, - 0x14, 0xee, 0x80, 0xa5, 0x36, 0xa6, 0xbd, 0x80, 0x93, 0x3d, 0xd6, 0xa3, 0xee, 0x71, 0x25, 0xaf, - 0x93, 0x7f, 0x37, 0x0a, 0xcd, 0xa5, 0x2f, 0x47, 0x0d, 0xe7, 0xa1, 0xb9, 0x3a, 0x06, 0x3c, 0x3b, - 0xf6, 0x09, 0x1a, 0x77, 0x86, 0x9f, 0x83, 0x72, 0x1f, 0x4b, 0xb7, 0x9b, 0x70, 0x95, 0x34, 0x57, - 0x3d, 0x0a, 0xcd, 0xf2, 0x6e, 0x06, 0x9f, 0x87, 0xe6, 0xca, 0xc8, 0x51, 0xf3, 0x8c, 0xba, 0xc1, - 0x1f, 0xc0, 0xaa, 0xaa, 0xb6, 0xf0, 0xb1, 0x4b, 0xf6, 0x49, 0x8f, 0xb8, 0x92, 0xf1, 0x4a, 0x41, - 0x97, 0xfa, 0xa3, 0x91, 0xec, 0xd3, 0xef, 0x6d, 0xf9, 0x47, 0x1d, 0x05, 0x08, 0x4b, 0xb5, 0x95, - 0x4a, 0x7f, 0x07, 0x37, 0x49, 0x6f, 0xe8, 0xea, 0xbc, 0x15, 0x85, 0xe6, 0xea, 0x93, 0x49, 0x46, - 0x34, 0x1d, 0x04, 0x32, 0xb0, 0xcc, 0x9a, 0xdf, 0x11, 0x57, 0xa6, 0x61, 0xcb, 0xd7, 0x0f, 0x0b, - 0xa3, 0xd0, 0x5c, 0x7e, 0x3a, 0x46, 0x87, 0x26, 0xe8, 0x55, 0xc1, 0x04, 0x6d, 0x91, 0x2f, 0xda, - 0x6d, 0xe2, 0x4a, 0x51, 0xb9, 0x95, 0x15, 0x6c, 0x3f, 0x83, 0x55, 0xc1, 0xb2, 0xe3, 0x56, 0x0f, - 0x0b, 0x81, 0x46, 0xdd, 0xe0, 0xa7, 0x60, 0x59, 0xf5, 0x3a, 0x0b, 0xe4, 0x3e, 0x71, 0x99, 0xd7, - 0x12, 0x95, 0x85, 0x9a, 0xd1, 0x28, 0xc4, 0x2f, 0x78, 0x36, 0x66, 0x41, 0x13, 0x37, 0xe1, 0x73, - 0x70, 0x37, 0xed, 0x22, 0x44, 0x06, 0x94, 0x7c, 0x7f, 0x40, 0xb8, 0x3a, 0x88, 0x4a, 0xb1, 0x96, - 0x6b, 0x94, 0x9c, 0x77, 0xa2, 0xd0, 0xbc, 0xbb, 0x79, 0xf1, 0x15, 0x74, 0x99, 0x2f, 0x7c, 0x09, - 0x20, 0x27, 0xd4, 0x1b, 0x30, 0x57, 0xb7, 0x5f, 0xd2, 0x10, 0x40, 0xe7, 0xf7, 0x20, 0x0a, 0x4d, - 0x88, 0xa6, 0xac, 0xe7, 0xa1, 0x79, 0x67, 0x1a, 0xd5, 0xed, 0x71, 0x01, 0x57, 0xfd, 0xd4, 0x00, - 0xf7, 0x26, 0x26, 0x38, 0x9e, 0x98, 0x20, 0xee, 0x78, 0xf8, 0x12, 0x14, 0xd5, 0x87, 0x69, 0x61, - 0x89, 0xf5, 0x48, 0x97, 0x37, 0x1e, 0xcc, 0xf6, 0x19, 0xe3, 0x6f, 0xb6, 0x4b, 0x24, 0x76, 0x60, - 0x32, 0x34, 0x20, 0xc3, 0x50, 0xca, 0x0a, 0x0f, 0x40, 0x31, 0x89, 0x2c, 0x2a, 0xf3, 0x7a, 0x3a, - 0x1f, 0xce, 0x34, 0x9d, 0x13, 0xcf, 0x76, 0xf2, 0x2a, 0x0a, 0x4a, 0xb9, 0xea, 0xff, 0x18, 0xa0, - 0x76, 0x55, 0x6a, 0x3b, 0x54, 0x48, 0x78, 0x38, 0x95, 0x9e, 0x35, 0x63, 0x97, 0x52, 0x11, 0x27, - 0x77, 0x3b, 0x49, 0xae, 0x38, 0x44, 0x46, 0x52, 0x6b, 0x83, 0x02, 0x95, 0xa4, 0x3f, 0xcc, 0x6b, - 0xf3, 0x3a, 0x79, 0x8d, 0xbd, 0x39, 0xdb, 0x3f, 0xdb, 0x8a, 0x17, 0xc5, 0xf4, 0xf5, 0xdf, 0x0d, - 0x90, 0x57, 0x0b, 0x09, 0xbe, 0x0f, 0x4a, 0xd8, 0xa7, 0x5f, 0x71, 0x16, 0xf8, 0xa2, 0x62, 0xe8, - 0xce, 0x5b, 0x8a, 0x42, 0xb3, 0xb4, 0xb9, 0xb7, 0x1d, 0x83, 0x28, 0xb3, 0xc3, 0x75, 0x50, 0xc6, - 0x3e, 0x4d, 0x1b, 0x75, 0x5e, 0x5f, 0x5f, 0x51, 0x63, 0xb3, 0xb9, 0xb7, 0x9d, 0x36, 0xe7, 0xe8, - 0x1d, 0xc5, 0xcf, 0x89, 0x60, 0x01, 0x77, 0x93, 0x55, 0x9a, 0xf0, 0xa3, 0x21, 0x88, 0x32, 0x3b, - 0xfc, 0x00, 0x14, 0x84, 0xcb, 0x7c, 0x92, 0x6c, 0xc3, 0x3b, 0xea, 0xd9, 0xfb, 0x0a, 0x38, 0x0f, - 0xcd, 0x92, 0xfe, 0xa1, 0xdb, 0x32, 0xbe, 0x54, 0xff, 0xc5, 0x00, 0x70, 0x7a, 0xe1, 0xc2, 0xcf, - 0x00, 0x60, 0xe9, 0x29, 0x49, 0xc9, 0xd4, 0xbd, 0x94, 0xa2, 0xe7, 0xa1, 0xb9, 0x94, 0x9e, 0x34, - 0xe5, 0x88, 0x0b, 0xfc, 0x1a, 0xe4, 0xd5, 0x92, 0x4e, 0x54, 0xe6, 0xbd, 0x99, 0x17, 0x7f, 0x26, - 0x5d, 0xea, 0x84, 0x34, 0x49, 0xfd, 0x67, 0x03, 0xdc, 0xde, 0x27, 0x7c, 0x40, 0x5d, 0x82, 0x48, - 0x9b, 0x70, 0xe2, 0xb9, 0x04, 0xda, 0xa0, 0x94, 0x2e, 0xc1, 0x44, 0xf6, 0x56, 0x13, 0xdf, 0x52, - 0xba, 0x30, 0x51, 0x76, 0x27, 0x95, 0xc8, 0xf9, 0x4b, 0x25, 0xf2, 0x1e, 0xc8, 0xfb, 0x58, 0x76, - 0x2b, 0x39, 0x7d, 0xa3, 0xa8, 0xac, 0x7b, 0x58, 0x76, 0x91, 0x46, 0xb5, 0x95, 0x71, 0xa9, 0xeb, - 0x5a, 0x48, 0xac, 0x8c, 0x4b, 0xa4, 0xd1, 0xfa, 0x9f, 0xb7, 0xc0, 0xea, 0x01, 0xee, 0xd1, 0xd6, - 0x8d, 0x2c, 0xdf, 0xc8, 0xf2, 0x95, 0xb2, 0x0c, 0x6e, 0x64, 0xf9, 0x3a, 0xb2, 0x5c, 0xff, 0xc3, - 0x00, 0xd5, 0xa9, 0x09, 0x7b, 0xdd, 0xb2, 0xf9, 0xcd, 0x94, 0x6c, 0x3e, 0x9a, 0x69, 0x7a, 0xa6, - 0x1e, 0x3e, 0x25, 0x9c, 0xff, 0x1a, 0xa0, 0x7e, 0x75, 0x7a, 0xaf, 0x41, 0x3a, 0xbb, 0xe3, 0xd2, - 0xb9, 0x75, 0xbd, 0xdc, 0x66, 0x11, 0xcf, 0x5f, 0x0d, 0xf0, 0xc6, 0x05, 0xfb, 0x0b, 0xbe, 0x0d, - 0x72, 0x01, 0xef, 0x25, 0x2b, 0x78, 0x21, 0x0a, 0xcd, 0xdc, 0x73, 0xb4, 0x83, 0x14, 0x06, 0x0f, - 0xc1, 0x82, 0x88, 0x55, 0x20, 0xc9, 0xfc, 0xe3, 0x99, 0x9e, 0x37, 0xa9, 0x1c, 0x4e, 0x39, 0x0a, - 0xcd, 0x85, 0x21, 0x3a, 0xa4, 0x84, 0x0d, 0x50, 0x74, 0xb1, 0x13, 0x78, 0xad, 0x44, 0xb5, 0x16, - 0x9d, 0x45, 0x55, 0xa4, 0xad, 0xcd, 0x18, 0x43, 0xa9, 0xd5, 0xd9, 0x3e, 0x39, 0xab, 0xce, 0xbd, - 0x3a, 0xab, 0xce, 0x9d, 0x9e, 0x55, 0xe7, 0x7e, 0x8c, 0xaa, 0xc6, 0x49, 0x54, 0x35, 0x5e, 0x45, - 0x55, 0xe3, 0x34, 0xaa, 0x1a, 0x7f, 0x45, 0x55, 0xe3, 0xa7, 0xbf, 0xab, 0x73, 0xdf, 0xde, 0x9f, - 0xe1, 0xdf, 0xec, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x43, 0x44, 0x86, 0xf5, 0x0c, 0x0f, 0x00, + // 1169 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4d, 0x6f, 0x1b, 0x45, + 0x18, 0xce, 0xc6, 0x36, 0xb1, 0xc7, 0x4e, 0xd2, 0x0c, 0xd0, 0x2e, 0xa5, 0xf2, 0x5a, 0xae, 0x84, + 0x82, 0x00, 0x6f, 0x9b, 0x96, 0x52, 0x71, 0x41, 0xb1, 0x29, 0x28, 0x22, 0x69, 0xa3, 0x49, 0x3f, + 0x10, 0xea, 0xa1, 0xe3, 0xf5, 0xd8, 0x1e, 0x62, 0xef, 0xac, 0x66, 0x66, 0x4d, 0x7b, 0xe3, 0x27, + 0xf0, 0x17, 0xe0, 0x4f, 0xc0, 0x95, 0x5b, 0x8f, 0xbd, 0x91, 0x03, 0x5a, 0x91, 0xe5, 0xc2, 0x81, + 0x5f, 0x90, 0x13, 0x9a, 0xd9, 0xf5, 0xae, 0xbf, 0x12, 0x56, 0x39, 0xe4, 0x94, 0x5b, 0xe6, 0x79, + 0xdf, 0xf7, 0x79, 0xe7, 0x19, 0xbf, 0x1f, 0xab, 0x80, 0xdd, 0xc3, 0xfb, 0xa2, 0x41, 0x99, 0x7d, + 0xe8, 0xb7, 0x09, 0x77, 0x89, 0x24, 0xc2, 0x1e, 0x11, 0xb7, 0xc3, 0xb8, 0x1d, 0x1b, 0xb0, 0x47, + 0x6d, 0xdc, 0x19, 0x52, 0x21, 0x28, 0x73, 0x39, 0xe9, 0x51, 0x21, 0x39, 0x96, 0x94, 0xb9, 0xf6, + 0xe8, 0xb6, 0xdd, 0x23, 0x2e, 0xe1, 0x58, 0x92, 0x4e, 0xc3, 0xe3, 0x4c, 0x32, 0x78, 0x33, 0x0a, + 0x6a, 0x60, 0x8f, 0x36, 0x16, 0x06, 0x35, 0x46, 0xb7, 0xaf, 0x7f, 0xd2, 0xa3, 0xb2, 0xef, 0xb7, + 0x1b, 0x0e, 0x1b, 0xda, 0x3d, 0xd6, 0x63, 0xb6, 0x8e, 0x6d, 0xfb, 0x5d, 0x7d, 0xd2, 0x07, 0xfd, + 0x57, 0xc4, 0x79, 0xfd, 0x6e, 0x7a, 0x91, 0x21, 0x76, 0xfa, 0xd4, 0x25, 0xfc, 0x95, 0xed, 0x1d, + 0xf6, 0x14, 0x20, 0xec, 0x21, 0x91, 0x78, 0xc1, 0x4d, 0xae, 0xdb, 0xa7, 0x45, 0x71, 0xdf, 0x95, + 0x74, 0x48, 0xe6, 0x02, 0xee, 0xfd, 0x5f, 0x80, 0x70, 0xfa, 0x64, 0x88, 0x67, 0xe3, 0xea, 0x5d, + 0xb0, 0xb6, 0x87, 0xa5, 0xd3, 0x6f, 0x31, 0xb7, 0x43, 0x95, 0x44, 0x58, 0x03, 0x79, 0x17, 0x0f, + 0x89, 0x69, 0xd4, 0x8c, 0xcd, 0x52, 0xb3, 0xf2, 0x3a, 0xb0, 0x96, 0xc2, 0xc0, 0xca, 0x3f, 0xc4, + 0x43, 0x82, 0xb4, 0x05, 0x6e, 0x01, 0x40, 0x5e, 0x7a, 0x9c, 0xe8, 0xe7, 0x31, 0x97, 0xb5, 0x1f, + 0x8c, 0xfd, 0xc0, 0x83, 0xc4, 0x82, 0x26, 0xbc, 0xea, 0xbf, 0x16, 0xc1, 0xfa, 0x9e, 0x2f, 0xb1, + 0xa4, 0x6e, 0xef, 0x19, 0x69, 0xf7, 0x19, 0x3b, 0xcc, 0x90, 0x89, 0x83, 0x8a, 0x33, 0xa0, 0xc4, + 0x95, 0x2d, 0xe6, 0x76, 0x69, 0x4f, 0xe7, 0x2a, 0x6f, 0xdd, 0x6f, 0x64, 0xf8, 0x9d, 0x1a, 0x71, + 0x96, 0xd6, 0x44, 0x7c, 0xf3, 0x9d, 0x38, 0x47, 0x65, 0x12, 0x45, 0x53, 0x39, 0xe0, 0x73, 0x50, + 0xe0, 0xfe, 0x80, 0x08, 0x33, 0x57, 0xcb, 0x6d, 0x96, 0xb7, 0x3e, 0xcb, 0x94, 0x0c, 0xf9, 0x03, + 0xf2, 0x8c, 0xca, 0xfe, 0x23, 0x8f, 0x44, 0xa0, 0x68, 0xae, 0xc6, 0xb9, 0x0a, 0xca, 0x26, 0x50, + 0x44, 0x0a, 0x77, 0xc1, 0x6a, 0x17, 0xd3, 0x81, 0xcf, 0xc9, 0x3e, 0x1b, 0x50, 0xe7, 0x95, 0x99, + 0xd7, 0xe2, 0x3f, 0x08, 0x03, 0x6b, 0xf5, 0xab, 0x49, 0xc3, 0x49, 0x60, 0x6d, 0x4c, 0x01, 0x8f, + 0x5f, 0x79, 0x04, 0x4d, 0x07, 0xc3, 0x2f, 0x41, 0x79, 0xa8, 0x7e, 0xbd, 0x98, 0xab, 0xa4, 0xb9, + 0xea, 0x61, 0x60, 0x95, 0xf7, 0x52, 0xf8, 0x24, 0xb0, 0xd6, 0x27, 0x8e, 0x9a, 0x67, 0x32, 0x0c, + 0xbe, 0x04, 0x1b, 0xea, 0xb5, 0x85, 0x87, 0x1d, 0x72, 0x40, 0x06, 0xc4, 0x91, 0x8c, 0x9b, 0x05, + 0xfd, 0xd4, 0x77, 0x26, 0xd4, 0x27, 0x75, 0xd5, 0xf0, 0x0e, 0x7b, 0x0a, 0x10, 0x0d, 0x55, 0xbe, + 0x4a, 0xfe, 0x2e, 0x6e, 0x93, 0xc1, 0x38, 0xb4, 0xf9, 0x6e, 0x18, 0x58, 0x1b, 0x0f, 0x67, 0x19, + 0xd1, 0x7c, 0x12, 0xc8, 0xc0, 0x1a, 0x6b, 0x7f, 0x4f, 0x1c, 0x99, 0xa4, 0x2d, 0x9f, 0x3f, 0x2d, + 0x0c, 0x03, 0x6b, 0xed, 0xd1, 0x14, 0x1d, 0x9a, 0xa1, 0x57, 0x0f, 0x26, 0x68, 0x87, 0x3c, 0xe8, + 0x76, 0x89, 0x23, 0x85, 0xf9, 0x56, 0xfa, 0x60, 0x07, 0x29, 0xac, 0x1e, 0x2c, 0x3d, 0xb6, 0x06, + 0x58, 0x08, 0x34, 0x19, 0x06, 0x3f, 0x07, 0x6b, 0xaa, 0xa7, 0x98, 0x2f, 0x0f, 0x88, 0xc3, 0xdc, + 0x8e, 0x30, 0x57, 0x6a, 0xc6, 0x66, 0x21, 0xba, 0xc1, 0xe3, 0x29, 0x0b, 0x9a, 0xf1, 0x84, 0x4f, + 0xc0, 0xb5, 0xa4, 0x8a, 0x10, 0x19, 0x51, 0xf2, 0xc3, 0x53, 0xc2, 0xd5, 0x41, 0x98, 0xc5, 0x5a, + 0x6e, 0xb3, 0xd4, 0x7c, 0x3f, 0x0c, 0xac, 0x6b, 0xdb, 0x8b, 0x5d, 0xd0, 0x69, 0xb1, 0xf0, 0x05, + 0x80, 0x9c, 0x50, 0x77, 0xc4, 0x1c, 0x5d, 0x7e, 0x71, 0x41, 0x00, 0xad, 0xef, 0x56, 0x18, 0x58, + 0x10, 0xcd, 0x59, 0x4f, 0x02, 0xeb, 0xea, 0x3c, 0xaa, 0xcb, 0x63, 0x01, 0x17, 0x1c, 0x81, 0xf5, + 0xe1, 0xd4, 0xa4, 0x10, 0x66, 0x45, 0x77, 0xc8, 0x9d, 0x4c, 0x1d, 0x32, 0x3d, 0x65, 0x9a, 0xd7, + 0xe2, 0xee, 0x58, 0x9f, 0xc6, 0x05, 0x9a, 0x4d, 0x52, 0x3f, 0x32, 0xc0, 0x8d, 0x99, 0xc9, 0x11, + 0x75, 0xaa, 0x1f, 0x91, 0xc3, 0x17, 0xa0, 0xa8, 0x0a, 0xa2, 0x83, 0x25, 0xd6, 0xa3, 0xa4, 0xbc, + 0x75, 0x2b, 0x5b, 0xf9, 0x44, 0xb5, 0xb2, 0x47, 0x24, 0x4e, 0xc7, 0x57, 0x8a, 0xa1, 0x84, 0x15, + 0x3e, 0x05, 0xc5, 0x38, 0xb3, 0x30, 0x97, 0xb5, 0xe6, 0xbb, 0xd9, 0x34, 0x4f, 0x5f, 0xbb, 0x99, + 0x57, 0x59, 0x50, 0xc2, 0x55, 0xff, 0xc7, 0x00, 0xb5, 0xb3, 0xa4, 0xed, 0x52, 0x21, 0xe1, 0xf3, + 0x39, 0x79, 0x8d, 0x8c, 0xdd, 0x41, 0x45, 0x24, 0xee, 0x4a, 0x2c, 0xae, 0x38, 0x46, 0x26, 0xa4, + 0x75, 0x41, 0x81, 0x4a, 0x32, 0x1c, 0xeb, 0xda, 0x3e, 0x8f, 0xae, 0xa9, 0x3b, 0xa7, 0x73, 0x6f, + 0x47, 0xf1, 0xa2, 0x88, 0xbe, 0xfe, 0xbb, 0x01, 0xf2, 0x6a, 0x10, 0xc2, 0x8f, 0x40, 0x09, 0x7b, + 0xf4, 0x6b, 0xce, 0x7c, 0x4f, 0x98, 0x86, 0xae, 0xf8, 0xd5, 0x30, 0xb0, 0x4a, 0xdb, 0xfb, 0x3b, + 0x11, 0x88, 0x52, 0x3b, 0xbc, 0x0d, 0xca, 0xd8, 0xa3, 0x49, 0x83, 0x2c, 0x6b, 0xf7, 0x75, 0xd5, + 0xae, 0xdb, 0xfb, 0x3b, 0x49, 0x53, 0x4c, 0xfa, 0x28, 0x7e, 0x4e, 0x04, 0xf3, 0xb9, 0x13, 0x8f, + 0xf0, 0x98, 0x1f, 0x8d, 0x41, 0x94, 0xda, 0xe1, 0xc7, 0xa0, 0x20, 0x1c, 0xe6, 0x91, 0x78, 0x0a, + 0x5f, 0x55, 0xd7, 0x3e, 0x50, 0xc0, 0x49, 0x60, 0x95, 0xf4, 0x1f, 0xba, 0x1d, 0x22, 0xa7, 0xfa, + 0x2f, 0x06, 0x80, 0xf3, 0x83, 0x1e, 0x7e, 0x01, 0x00, 0x4b, 0x4e, 0xb1, 0x24, 0x4b, 0xd7, 0x52, + 0x82, 0x9e, 0x04, 0xd6, 0x6a, 0x72, 0xd2, 0x94, 0x13, 0x21, 0xf0, 0x1b, 0x90, 0x57, 0xcb, 0x21, + 0xde, 0x6e, 0x1f, 0x66, 0x5e, 0x38, 0xe9, 0xca, 0x54, 0x27, 0xa4, 0x49, 0xea, 0x3f, 0x1b, 0xe0, + 0xca, 0x01, 0xe1, 0x23, 0xea, 0x10, 0x44, 0xba, 0x84, 0x13, 0xd7, 0x21, 0xd0, 0x06, 0xa5, 0x64, + 0xf8, 0xc6, 0xeb, 0x76, 0x23, 0x8e, 0x2d, 0x25, 0x83, 0x1a, 0xa5, 0x3e, 0xc9, 0x6a, 0x5e, 0x3e, + 0x75, 0x35, 0xdf, 0x00, 0x79, 0x0f, 0xcb, 0xbe, 0x99, 0xd3, 0x1e, 0x45, 0x65, 0xdd, 0xc7, 0xb2, + 0x8f, 0x34, 0xaa, 0xad, 0x8c, 0x4b, 0xfd, 0xae, 0x85, 0xd8, 0xca, 0xb8, 0x44, 0x1a, 0xad, 0xff, + 0xb1, 0x02, 0x36, 0x9e, 0xe2, 0x01, 0xed, 0x5c, 0x7e, 0x0e, 0x5c, 0x7e, 0x0e, 0x9c, 0xf9, 0x39, + 0x00, 0x2e, 0x3f, 0x07, 0xce, 0xf5, 0x39, 0xb0, 0x60, 0x59, 0x97, 0x2f, 0x62, 0x59, 0xff, 0x69, + 0x80, 0xea, 0x5c, 0x67, 0x5f, 0xf4, 0xba, 0xfe, 0x76, 0x6e, 0x5d, 0xdf, 0xcb, 0xa4, 0x7a, 0xee, + 0xe2, 0x73, 0x0b, 0xfb, 0x5f, 0x03, 0xd4, 0xcf, 0x96, 0x77, 0x01, 0x2b, 0xbb, 0x3f, 0xbd, 0xb2, + 0x5b, 0xe7, 0xd3, 0x96, 0x65, 0x69, 0xff, 0x66, 0x80, 0xb7, 0x17, 0xcc, 0x4d, 0xf8, 0x1e, 0xc8, + 0xf9, 0x7c, 0x10, 0x8f, 0xfe, 0x95, 0x30, 0xb0, 0x72, 0x4f, 0xd0, 0x2e, 0x52, 0x18, 0x7c, 0x0e, + 0x56, 0x44, 0xb4, 0x7d, 0x62, 0xe5, 0x9f, 0x66, 0xba, 0xde, 0xec, 0xc6, 0x6a, 0x96, 0xc3, 0xc0, + 0x5a, 0x19, 0xa3, 0x63, 0x4a, 0xb8, 0x09, 0x8a, 0x0e, 0x6e, 0xfa, 0x6e, 0x27, 0xde, 0x96, 0x95, + 0x66, 0x45, 0x3d, 0x52, 0x6b, 0x3b, 0xc2, 0x50, 0x62, 0x6d, 0xee, 0xbc, 0x3e, 0xae, 0x2e, 0xbd, + 0x39, 0xae, 0x2e, 0x1d, 0x1d, 0x57, 0x97, 0x7e, 0x0c, 0xab, 0xc6, 0xeb, 0xb0, 0x6a, 0xbc, 0x09, + 0xab, 0xc6, 0x51, 0x58, 0x35, 0xfe, 0x0a, 0xab, 0xc6, 0x4f, 0x7f, 0x57, 0x97, 0xbe, 0xbb, 0x99, + 0xe1, 0xbf, 0x04, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x7f, 0xe1, 0x3a, 0x73, 0x64, 0x10, 0x00, 0x00, } +func (m *MatchCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MatchCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -435,6 +501,20 @@ func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } if m.ObjectSelector != nil { { size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -791,6 +871,20 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } if m.ObjectSelector != nil { { size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -1036,6 +1130,19 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *MatchCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *MutatingWebhook) Size() (n int) { if m == nil { return 0 @@ -1085,6 +1192,12 @@ func (m *MutatingWebhook) Size() (n int) { l = m.ObjectSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1235,6 +1348,12 @@ func (m *ValidatingWebhook) Size() (n int) { l = m.ObjectSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1299,6 +1418,17 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *MatchCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MatchCondition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} func (this *MutatingWebhook) String() string { if this == nil { return "nil" @@ -1308,6 +1438,11 @@ func (this *MutatingWebhook) String() string { repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," } repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" s := strings.Join([]string{`&MutatingWebhook{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, @@ -1320,6 +1455,7 @@ func (this *MutatingWebhook) String() string { `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, `}`, }, "") return s @@ -1402,6 +1538,11 @@ func (this *ValidatingWebhook) String() string { repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," } repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" s := strings.Join([]string{`&ValidatingWebhook{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, @@ -1413,6 +1554,7 @@ func (this *ValidatingWebhook) String() string { `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, `}`, }, "") return s @@ -1469,6 +1611,120 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *MatchCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1853,6 +2109,40 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2920,6 +3210,40 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1/generated.proto index aa266a2a5..cdf1f4765 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.proto @@ -28,6 +28,35 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/admissionregistration/v1"; +// MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook. +message MatchCondition { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + optional string name = 1; + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + optional string expression = 2; +} + // MutatingWebhook describes an admission webhook and the resources and operations it applies to. message MutatingWebhook { // The name of the admission webhook. @@ -173,6 +202,28 @@ message MutatingWebhook { // Defaults to "Never". // +optional optional string reinvocationPolicy = 10; + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + repeated MatchCondition matchConditions = 12; } // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. @@ -409,6 +460,28 @@ message ValidatingWebhook { // include any versions known to the API Server, calls to the webhook will fail // and be subject to the failure policy. repeated string admissionReviewVersions = 8; + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + repeated MatchCondition matchConditions = 11; } // ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. diff --git a/vendor/k8s.io/api/admissionregistration/v1/types.go b/vendor/k8s.io/api/admissionregistration/v1/types.go index e74b276f6..74f17d54a 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1/types.go @@ -307,6 +307,28 @@ type ValidatingWebhook struct { // include any versions known to the API Server, calls to the webhook will fail // and be subject to the failure policy. AdmissionReviewVersions []string `json:"admissionReviewVersions" protobuf:"bytes,8,rep,name=admissionReviewVersions"` + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,11,opt,name=matchConditions"` } // MutatingWebhook describes an admission webhook and the resources and operations it applies to. @@ -454,6 +476,28 @@ type MutatingWebhook struct { // Defaults to "Never". // +optional ReinvocationPolicy *ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,10,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,12,opt,name=matchConditions"` } // ReinvocationPolicyType specifies what type of policy the admission hook uses. @@ -563,3 +607,32 @@ type ServiceReference struct { // +optional Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` } + +// MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook. +type MatchCondition struct { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + Expression string `json:"expression" protobuf:"bytes,2,opt,name=expression"` +} diff --git a/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go index ba92729c3..ce306b307 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go @@ -24,9 +24,19 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_MatchCondition = map[string]string{ + "": "MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook.", + "name": "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.", + "expression": "Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:\n\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\nDocumentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/\n\nRequired.", +} + +func (MatchCondition) SwaggerDoc() map[string]string { + return map_MatchCondition +} + var map_MutatingWebhook = map[string]string{ "": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", @@ -40,6 +50,7 @@ var map_MutatingWebhook = map[string]string{ "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", "reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.", } func (MutatingWebhook) SwaggerDoc() map[string]string { @@ -111,6 +122,7 @@ var map_ValidatingWebhook = map[string]string{ "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.", } func (ValidatingWebhook) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go index cff7377a5..b95609913 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go @@ -26,6 +26,22 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchCondition) DeepCopyInto(out *MatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchCondition. +func (in *MatchCondition) DeepCopy() *MatchCondition { + if in == nil { + return nil + } + out := new(MatchCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { *out = *in @@ -77,6 +93,11 @@ func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { *out = new(ReinvocationPolicyType) **out = **in } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } return } @@ -286,6 +307,11 @@ func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go index a00f532d2..746535026 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go @@ -45,10 +45,94 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} } +func (*AuditAnnotation) ProtoMessage() {} +func (*AuditAnnotation) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{0} +} +func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditAnnotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditAnnotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditAnnotation.Merge(m, src) +} +func (m *AuditAnnotation) XXX_Size() int { + return m.Size() +} +func (m *AuditAnnotation) XXX_DiscardUnknown() { + xxx_messageInfo_AuditAnnotation.DiscardUnknown(m) +} + +var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo + +func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} } +func (*ExpressionWarning) ProtoMessage() {} +func (*ExpressionWarning) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{1} +} +func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExpressionWarning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExpressionWarning) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExpressionWarning.Merge(m, src) +} +func (m *ExpressionWarning) XXX_Size() int { + return m.Size() +} +func (m *ExpressionWarning) XXX_DiscardUnknown() { + xxx_messageInfo_ExpressionWarning.DiscardUnknown(m) +} + +var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo + +func (m *MatchCondition) Reset() { *m = MatchCondition{} } +func (*MatchCondition) ProtoMessage() {} +func (*MatchCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{2} +} +func (m *MatchCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MatchCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MatchCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_MatchCondition.Merge(m, src) +} +func (m *MatchCondition) XXX_Size() int { + return m.Size() +} +func (m *MatchCondition) XXX_DiscardUnknown() { + xxx_messageInfo_MatchCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_MatchCondition proto.InternalMessageInfo + func (m *MatchResources) Reset() { *m = MatchResources{} } func (*MatchResources) ProtoMessage() {} func (*MatchResources) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{0} + return fileDescriptor_c3be8d256e3ae3cf, []int{3} } func (m *MatchResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -76,7 +160,7 @@ var xxx_messageInfo_MatchResources proto.InternalMessageInfo func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} } func (*NamedRuleWithOperations) ProtoMessage() {} func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{1} + return fileDescriptor_c3be8d256e3ae3cf, []int{4} } func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -104,7 +188,7 @@ var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo func (m *ParamKind) Reset() { *m = ParamKind{} } func (*ParamKind) ProtoMessage() {} func (*ParamKind) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{2} + return fileDescriptor_c3be8d256e3ae3cf, []int{5} } func (m *ParamKind) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -132,7 +216,7 @@ var xxx_messageInfo_ParamKind proto.InternalMessageInfo func (m *ParamRef) Reset() { *m = ParamRef{} } func (*ParamRef) ProtoMessage() {} func (*ParamRef) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{3} + return fileDescriptor_c3be8d256e3ae3cf, []int{6} } func (m *ParamRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -157,10 +241,38 @@ func (m *ParamRef) XXX_DiscardUnknown() { var xxx_messageInfo_ParamRef proto.InternalMessageInfo +func (m *TypeChecking) Reset() { *m = TypeChecking{} } +func (*TypeChecking) ProtoMessage() {} +func (*TypeChecking) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{7} +} +func (m *TypeChecking) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypeChecking) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypeChecking) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeChecking.Merge(m, src) +} +func (m *TypeChecking) XXX_Size() int { + return m.Size() +} +func (m *TypeChecking) XXX_DiscardUnknown() { + xxx_messageInfo_TypeChecking.DiscardUnknown(m) +} + +var xxx_messageInfo_TypeChecking proto.InternalMessageInfo + func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} } func (*ValidatingAdmissionPolicy) ProtoMessage() {} func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{4} + return fileDescriptor_c3be8d256e3ae3cf, []int{8} } func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -188,7 +300,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} } func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {} func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{5} + return fileDescriptor_c3be8d256e3ae3cf, []int{9} } func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -216,7 +328,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} } func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {} func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{6} + return fileDescriptor_c3be8d256e3ae3cf, []int{10} } func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -244,7 +356,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageIn func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} } func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {} func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{7} + return fileDescriptor_c3be8d256e3ae3cf, []int{11} } func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -272,7 +384,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageIn func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} } func (*ValidatingAdmissionPolicyList) ProtoMessage() {} func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{8} + return fileDescriptor_c3be8d256e3ae3cf, []int{12} } func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -300,7 +412,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} } func (*ValidatingAdmissionPolicySpec) ProtoMessage() {} func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{9} + return fileDescriptor_c3be8d256e3ae3cf, []int{13} } func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -325,10 +437,38 @@ func (m *ValidatingAdmissionPolicySpec) XXX_DiscardUnknown() { var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo +func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} } +func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {} +func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{14} +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyStatus.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo + func (m *Validation) Reset() { *m = Validation{} } func (*Validation) ProtoMessage() {} func (*Validation) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{10} + return fileDescriptor_c3be8d256e3ae3cf, []int{15} } func (m *Validation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -354,16 +494,21 @@ func (m *Validation) XXX_DiscardUnknown() { var xxx_messageInfo_Validation proto.InternalMessageInfo func init() { + proto.RegisterType((*AuditAnnotation)(nil), "k8s.io.api.admissionregistration.v1alpha1.AuditAnnotation") + proto.RegisterType((*ExpressionWarning)(nil), "k8s.io.api.admissionregistration.v1alpha1.ExpressionWarning") + proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1alpha1.MatchCondition") proto.RegisterType((*MatchResources)(nil), "k8s.io.api.admissionregistration.v1alpha1.MatchResources") proto.RegisterType((*NamedRuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1alpha1.NamedRuleWithOperations") proto.RegisterType((*ParamKind)(nil), "k8s.io.api.admissionregistration.v1alpha1.ParamKind") proto.RegisterType((*ParamRef)(nil), "k8s.io.api.admissionregistration.v1alpha1.ParamRef") + proto.RegisterType((*TypeChecking)(nil), "k8s.io.api.admissionregistration.v1alpha1.TypeChecking") proto.RegisterType((*ValidatingAdmissionPolicy)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicy") proto.RegisterType((*ValidatingAdmissionPolicyBinding)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBinding") proto.RegisterType((*ValidatingAdmissionPolicyBindingList)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingList") proto.RegisterType((*ValidatingAdmissionPolicyBindingSpec)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingSpec") proto.RegisterType((*ValidatingAdmissionPolicyList)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyList") proto.RegisterType((*ValidatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicySpec") + proto.RegisterType((*ValidatingAdmissionPolicyStatus)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyStatus") proto.RegisterType((*Validation)(nil), "k8s.io.api.admissionregistration.v1alpha1.Validation") } @@ -372,73 +517,194 @@ func init() { } var fileDescriptor_c3be8d256e3ae3cf = []byte{ - // 1054 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcd, 0x6e, 0x1c, 0x45, - 0x10, 0xf6, 0xc4, 0x9b, 0xc4, 0xdb, 0x1b, 0x3b, 0x76, 0xe3, 0x88, 0xc5, 0x82, 0xdd, 0xd5, 0x2a, - 0x42, 0xf6, 0x81, 0x19, 0xec, 0x04, 0x02, 0x27, 0x94, 0x21, 0x41, 0x44, 0xb1, 0x63, 0xab, 0x8d, - 0x12, 0x09, 0x11, 0x89, 0xf6, 0x4c, 0x7b, 0xb6, 0xb3, 0x3b, 0x3f, 0x4c, 0xf7, 0x58, 0xb6, 0x38, - 0x80, 0xc4, 0x0b, 0x70, 0xe0, 0x41, 0x38, 0x71, 0xe1, 0x05, 0x7c, 0xcc, 0xd1, 0x5c, 0x46, 0x78, - 0xb8, 0xc0, 0x0b, 0x80, 0xe4, 0x13, 0xea, 0x9e, 0x9e, 0xbf, 0xfd, 0xc1, 0xeb, 0x60, 0xe5, 0xb6, - 0x5d, 0x3f, 0xdf, 0x57, 0x55, 0x5d, 0x35, 0xd5, 0x0b, 0x50, 0xff, 0x23, 0xa6, 0x53, 0xdf, 0xe8, - 0x47, 0x7b, 0x24, 0xf4, 0x08, 0x27, 0xcc, 0x38, 0x20, 0x9e, 0xed, 0x87, 0x86, 0x52, 0xe0, 0x80, - 0x1a, 0xd8, 0x76, 0x29, 0x63, 0xd4, 0xf7, 0x42, 0xe2, 0x50, 0xc6, 0x43, 0xcc, 0xa9, 0xef, 0x19, - 0x07, 0xeb, 0x78, 0x10, 0xf4, 0xf0, 0xba, 0xe1, 0x10, 0x8f, 0x84, 0x98, 0x13, 0x5b, 0x0f, 0x42, - 0x9f, 0xfb, 0x70, 0x2d, 0x75, 0xd5, 0x71, 0x40, 0xf5, 0xb1, 0xae, 0x7a, 0xe6, 0xba, 0xf2, 0x9e, - 0x43, 0x79, 0x2f, 0xda, 0xd3, 0x2d, 0xdf, 0x35, 0x1c, 0xdf, 0xf1, 0x0d, 0x89, 0xb0, 0x17, 0xed, - 0xcb, 0x93, 0x3c, 0xc8, 0x5f, 0x29, 0xf2, 0xca, 0x9d, 0x29, 0x82, 0x1a, 0x0e, 0x67, 0xe5, 0x6e, - 0xe1, 0xe4, 0x62, 0xab, 0x47, 0x3d, 0x12, 0x1e, 0x19, 0x41, 0xdf, 0x11, 0x02, 0x66, 0xb8, 0x84, - 0xe3, 0x71, 0x5e, 0xc6, 0x24, 0xaf, 0x30, 0xf2, 0x38, 0x75, 0xc9, 0x88, 0xc3, 0x87, 0xe7, 0x39, - 0x30, 0xab, 0x47, 0x5c, 0x3c, 0xec, 0xd7, 0xfd, 0xad, 0x06, 0x16, 0xb6, 0x30, 0xb7, 0x7a, 0x88, - 0x30, 0x3f, 0x0a, 0x2d, 0xc2, 0xe0, 0x21, 0x58, 0xf2, 0xb0, 0x4b, 0x58, 0x80, 0x2d, 0xb2, 0x4b, - 0x06, 0xc4, 0xe2, 0x7e, 0xd8, 0xd4, 0x3a, 0xda, 0x6a, 0x63, 0xe3, 0x8e, 0x5e, 0x14, 0x37, 0xa7, - 0xd1, 0x83, 0xbe, 0x23, 0x04, 0x4c, 0x17, 0xd9, 0xe8, 0x07, 0xeb, 0xfa, 0x26, 0xde, 0x23, 0x83, - 0xcc, 0xd5, 0xbc, 0x95, 0xc4, 0xed, 0xa5, 0x27, 0xc3, 0x88, 0x68, 0x94, 0x04, 0xfa, 0x60, 0xc1, - 0xdf, 0x7b, 0x41, 0x2c, 0x9e, 0xd3, 0x5e, 0x79, 0x75, 0x5a, 0x98, 0xc4, 0xed, 0x85, 0xed, 0x0a, - 0x1c, 0x1a, 0x82, 0x87, 0xdf, 0x81, 0xf9, 0x50, 0xe5, 0x8d, 0xa2, 0x01, 0x61, 0xcd, 0xd9, 0xce, - 0xec, 0x6a, 0x63, 0xc3, 0xd4, 0xa7, 0xee, 0x21, 0x5d, 0x24, 0x66, 0x0b, 0xe7, 0x67, 0x94, 0xf7, - 0xb6, 0x03, 0x92, 0xea, 0x99, 0x79, 0xeb, 0x38, 0x6e, 0xcf, 0x24, 0x71, 0x7b, 0x1e, 0x95, 0x09, - 0x50, 0x95, 0x0f, 0xfe, 0xa4, 0x81, 0x65, 0x72, 0x68, 0x0d, 0x22, 0x9b, 0x54, 0xec, 0x9a, 0xb5, - 0x4b, 0x0b, 0xe4, 0x6d, 0x15, 0xc8, 0xf2, 0xc3, 0x31, 0x3c, 0x68, 0x2c, 0x3b, 0x7c, 0x00, 0x1a, - 0xae, 0x68, 0x8a, 0x1d, 0x7f, 0x40, 0xad, 0xa3, 0xe6, 0xf5, 0x8e, 0xb6, 0x5a, 0x37, 0xbb, 0x49, - 0xdc, 0x6e, 0x6c, 0x15, 0xe2, 0xb3, 0xb8, 0x7d, 0xb3, 0x74, 0xfc, 0xe2, 0x28, 0x20, 0xa8, 0xec, - 0xd6, 0x3d, 0xd1, 0xc0, 0x9b, 0x13, 0xa2, 0x82, 0xf7, 0x8a, 0xca, 0xcb, 0xd6, 0x68, 0x6a, 0x9d, - 0xd9, 0xd5, 0xba, 0xb9, 0x54, 0xae, 0x98, 0x54, 0xa0, 0xaa, 0x1d, 0xfc, 0x41, 0x03, 0x30, 0x1c, - 0xc1, 0x53, 0x8d, 0x72, 0x6f, 0x9a, 0x7a, 0xe9, 0x63, 0x8a, 0xb4, 0xa2, 0x8a, 0x04, 0x47, 0x75, - 0x68, 0x0c, 0x5d, 0x17, 0x83, 0xfa, 0x0e, 0x0e, 0xb1, 0xfb, 0x98, 0x7a, 0x36, 0xdc, 0x00, 0x00, - 0x07, 0xf4, 0x29, 0x09, 0x05, 0x99, 0x9c, 0x94, 0xba, 0x09, 0x15, 0x20, 0xb8, 0xbf, 0xf3, 0x48, - 0x69, 0x50, 0xc9, 0x0a, 0x76, 0x40, 0xad, 0x4f, 0x3d, 0x5b, 0xc6, 0x5d, 0x37, 0x6f, 0x28, 0xeb, - 0x9a, 0xc0, 0x43, 0x52, 0xd3, 0x7d, 0x0e, 0xe6, 0x24, 0x05, 0x22, 0xfb, 0xc2, 0x5a, 0x4c, 0x8b, - 0xc2, 0xce, 0xad, 0x45, 0x45, 0x90, 0xd4, 0x40, 0x03, 0xd4, 0xf3, 0x79, 0x52, 0xa0, 0x4b, 0xca, - 0xac, 0x9e, 0xcf, 0x1e, 0x2a, 0x6c, 0xba, 0x7f, 0x69, 0xe0, 0xad, 0xa7, 0x78, 0x40, 0x6d, 0xcc, - 0xa9, 0xe7, 0xdc, 0xcf, 0x6a, 0x95, 0x5e, 0x1d, 0xfc, 0x1a, 0xcc, 0x89, 0xa9, 0xb2, 0x31, 0xc7, - 0x6a, 0xf4, 0xdf, 0x9f, 0x6e, 0x06, 0xd3, 0x81, 0xdb, 0x22, 0x1c, 0x17, 0x25, 0x28, 0x64, 0x28, - 0x47, 0x85, 0x2f, 0x40, 0x8d, 0x05, 0xc4, 0x52, 0x17, 0xf7, 0xf9, 0x05, 0x1a, 0x7d, 0x62, 0xd4, - 0xbb, 0x01, 0xb1, 0x8a, 0xe2, 0x88, 0x13, 0x92, 0x1c, 0xdd, 0x7f, 0x34, 0xd0, 0x99, 0xe8, 0x65, - 0x52, 0xcf, 0xa6, 0x9e, 0xf3, 0x1a, 0x52, 0xfe, 0xa6, 0x92, 0xf2, 0xf6, 0x65, 0xa4, 0xac, 0x82, - 0x9f, 0x98, 0xf9, 0xdf, 0x1a, 0xb8, 0x7d, 0x9e, 0xf3, 0x26, 0x65, 0x1c, 0x7e, 0x35, 0x92, 0xbd, - 0x3e, 0xe5, 0x47, 0x97, 0xb2, 0x34, 0xf7, 0x45, 0x45, 0x3f, 0x97, 0x49, 0x4a, 0x99, 0x07, 0xe0, - 0x2a, 0xe5, 0xc4, 0x15, 0x63, 0x2a, 0x3e, 0x6b, 0x8f, 0x2f, 0x31, 0x75, 0x73, 0x5e, 0xf1, 0x5e, - 0x7d, 0x24, 0x18, 0x50, 0x4a, 0xd4, 0xfd, 0xf9, 0xca, 0xf9, 0x89, 0x8b, 0x3a, 0x89, 0xe1, 0x0d, - 0xa4, 0xf0, 0x49, 0x31, 0x60, 0xf9, 0x35, 0xee, 0xe4, 0x1a, 0x54, 0xb2, 0x82, 0xcf, 0xc1, 0x5c, - 0xa0, 0x46, 0x73, 0xcc, 0x86, 0x3a, 0x2f, 0xa3, 0x6c, 0xaa, 0xcd, 0x1b, 0xa2, 0x5a, 0xd9, 0x09, - 0xe5, 0x90, 0x30, 0x02, 0x0b, 0x6e, 0x65, 0x25, 0x37, 0x67, 0x25, 0xc9, 0xc7, 0x17, 0x20, 0xa9, - 0xee, 0xf4, 0x74, 0x19, 0x56, 0x65, 0x68, 0x88, 0xa4, 0xfb, 0xa7, 0x06, 0xde, 0x99, 0x58, 0xb2, - 0xd7, 0xd0, 0x24, 0xb4, 0xda, 0x24, 0x0f, 0x2e, 0xa5, 0x49, 0xc6, 0x77, 0xc7, 0xaf, 0xb3, 0xff, - 0x91, 0xaa, 0x6c, 0x0b, 0x0c, 0xea, 0x41, 0xf6, 0x81, 0x57, 0xb9, 0xde, 0xbd, 0xe8, 0x1d, 0x0b, - 0x5f, 0x73, 0x5e, 0x7c, 0x81, 0xf3, 0x23, 0x2a, 0x50, 0xe1, 0xb7, 0x60, 0x51, 0xde, 0xc0, 0xa7, - 0xbe, 0x27, 0x00, 0xa8, 0xc7, 0xb3, 0x35, 0xf6, 0x3f, 0x2e, 0x7a, 0x39, 0x89, 0xdb, 0x8b, 0x5b, - 0x43, 0xb0, 0x68, 0x84, 0x08, 0x0e, 0x40, 0xe3, 0x40, 0x15, 0x40, 0xac, 0xcf, 0xf4, 0xdd, 0xf3, - 0xc1, 0x2b, 0x94, 0xdc, 0xf7, 0xcc, 0x37, 0x54, 0x8d, 0x1b, 0x85, 0x8c, 0xa1, 0x32, 0x3c, 0xdc, - 0x04, 0xf3, 0xfb, 0x98, 0x0e, 0xa2, 0x90, 0xa8, 0x17, 0x45, 0x4d, 0xce, 0xd9, 0xbb, 0x62, 0xdb, - 0x7f, 0x56, 0x56, 0x9c, 0xc5, 0xed, 0xa5, 0x8a, 0x40, 0xbe, 0x2a, 0xaa, 0xce, 0xdd, 0x5f, 0x34, - 0x00, 0x0a, 0x2a, 0x78, 0x1b, 0x80, 0x87, 0x87, 0x41, 0x48, 0x58, 0x69, 0xfd, 0xd6, 0x44, 0x48, - 0xa8, 0x24, 0x87, 0x6b, 0xe0, 0xba, 0x4b, 0x18, 0xc3, 0x4e, 0xb6, 0x1e, 0x6f, 0xaa, 0xa8, 0xaf, - 0x6f, 0xa5, 0x62, 0x94, 0xe9, 0xe1, 0x33, 0x70, 0x2d, 0x24, 0x98, 0xf9, 0x9e, 0x9c, 0xbb, 0xba, - 0xf9, 0x49, 0x12, 0xb7, 0xaf, 0x21, 0x29, 0x39, 0x8b, 0xdb, 0xeb, 0xd3, 0x3c, 0xe8, 0xf5, 0x5d, - 0x8e, 0x79, 0xc4, 0x52, 0x27, 0xa4, 0xe0, 0xcc, 0xed, 0xe3, 0xd3, 0xd6, 0xcc, 0xcb, 0xd3, 0xd6, - 0xcc, 0xc9, 0x69, 0x6b, 0xe6, 0xfb, 0xa4, 0xa5, 0x1d, 0x27, 0x2d, 0xed, 0x65, 0xd2, 0xd2, 0x4e, - 0x92, 0x96, 0xf6, 0x7b, 0xd2, 0xd2, 0x7e, 0xfc, 0xa3, 0x35, 0xf3, 0xe5, 0xda, 0xd4, 0xff, 0x7d, - 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x20, 0xc8, 0x63, 0x1d, 0x40, 0x0d, 0x00, 0x00, + // 1407 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcb, 0x6f, 0x1b, 0x45, + 0x18, 0xcf, 0xc6, 0x4e, 0x9a, 0x8c, 0xf3, 0xb0, 0x87, 0x56, 0x75, 0x23, 0x6a, 0x47, 0xab, 0x0a, + 0x35, 0x12, 0xec, 0x92, 0xb4, 0x50, 0x40, 0x48, 0x28, 0xdb, 0x17, 0x7d, 0xa4, 0x89, 0xa6, 0x28, + 0x91, 0x10, 0x95, 0x98, 0xec, 0x4e, 0xec, 0xa9, 0xbd, 0x0f, 0x76, 0xd6, 0xa1, 0x11, 0x48, 0x54, + 0xe2, 0x02, 0x37, 0x0e, 0x5c, 0xf8, 0x5f, 0xb8, 0x70, 0xeb, 0xb1, 0xc7, 0x72, 0xc0, 0x22, 0xe6, + 0xc2, 0x5f, 0x00, 0x52, 0x2e, 0xa0, 0x99, 0x9d, 0x7d, 0x3b, 0xc4, 0x2e, 0x81, 0x9b, 0xf7, 0x7b, + 0xfc, 0x7e, 0xf3, 0x7d, 0xf3, 0x7d, 0x33, 0xdf, 0x18, 0xa0, 0xce, 0x3b, 0x4c, 0xa3, 0xae, 0xde, + 0xe9, 0xed, 0x12, 0xdf, 0x21, 0x01, 0x61, 0xfa, 0x3e, 0x71, 0x2c, 0xd7, 0xd7, 0xa5, 0x02, 0x7b, + 0x54, 0xc7, 0x96, 0x4d, 0x19, 0xa3, 0xae, 0xe3, 0x93, 0x16, 0x65, 0x81, 0x8f, 0x03, 0xea, 0x3a, + 0xfa, 0xfe, 0x2a, 0xee, 0x7a, 0x6d, 0xbc, 0xaa, 0xb7, 0x88, 0x43, 0x7c, 0x1c, 0x10, 0x4b, 0xf3, + 0x7c, 0x37, 0x70, 0xe1, 0x4a, 0xe8, 0xaa, 0x61, 0x8f, 0x6a, 0x43, 0x5d, 0xb5, 0xc8, 0x75, 0xe9, + 0x8d, 0x16, 0x0d, 0xda, 0xbd, 0x5d, 0xcd, 0x74, 0x6d, 0xbd, 0xe5, 0xb6, 0x5c, 0x5d, 0x20, 0xec, + 0xf6, 0xf6, 0xc4, 0x97, 0xf8, 0x10, 0xbf, 0x42, 0xe4, 0xa5, 0x2b, 0x23, 0x2c, 0x2a, 0xbf, 0x9c, + 0xa5, 0xab, 0x89, 0x93, 0x8d, 0xcd, 0x36, 0x75, 0x88, 0x7f, 0xa0, 0x7b, 0x9d, 0x16, 0x17, 0x30, + 0xdd, 0x26, 0x01, 0x1e, 0xe6, 0xa5, 0x1f, 0xe7, 0xe5, 0xf7, 0x9c, 0x80, 0xda, 0xa4, 0xe0, 0xf0, + 0xf6, 0x49, 0x0e, 0xcc, 0x6c, 0x13, 0x1b, 0xe7, 0xfd, 0x54, 0x06, 0x16, 0xd7, 0x7b, 0x16, 0x0d, + 0xd6, 0x1d, 0xc7, 0x0d, 0x44, 0x10, 0xf0, 0x22, 0x28, 0x75, 0xc8, 0x41, 0x5d, 0x59, 0x56, 0x2e, + 0xcf, 0x1a, 0x95, 0x67, 0xfd, 0xe6, 0xc4, 0xa0, 0xdf, 0x2c, 0xdd, 0x23, 0x07, 0x88, 0xcb, 0xe1, + 0x3a, 0x58, 0xdc, 0xc7, 0xdd, 0x1e, 0xb9, 0xf9, 0xc4, 0xf3, 0x89, 0x48, 0x41, 0x7d, 0x52, 0x98, + 0x9e, 0x97, 0xa6, 0x8b, 0xdb, 0x59, 0x35, 0xca, 0xdb, 0xab, 0x5d, 0x50, 0x4b, 0xbe, 0x76, 0xb0, + 0xef, 0x50, 0xa7, 0x05, 0x5f, 0x07, 0x33, 0x7b, 0x94, 0x74, 0x2d, 0x44, 0xf6, 0x24, 0x60, 0x55, + 0x02, 0xce, 0xdc, 0x92, 0x72, 0x14, 0x5b, 0xc0, 0x15, 0x70, 0xe6, 0xf3, 0xd0, 0xb1, 0x5e, 0x12, + 0xc6, 0x8b, 0xd2, 0xf8, 0x8c, 0xc4, 0x43, 0x91, 0x5e, 0xdd, 0x03, 0x0b, 0x1b, 0x38, 0x30, 0xdb, + 0xd7, 0x5d, 0xc7, 0xa2, 0x22, 0xc2, 0x65, 0x50, 0x76, 0xb0, 0x4d, 0x64, 0x88, 0x73, 0xd2, 0xb3, + 0xfc, 0x00, 0xdb, 0x04, 0x09, 0x0d, 0x5c, 0x03, 0x80, 0xe4, 0xe3, 0x83, 0xd2, 0x0e, 0xa4, 0x42, + 0x4b, 0x59, 0xa9, 0x3f, 0x97, 0x25, 0x11, 0x22, 0xcc, 0xed, 0xf9, 0x26, 0x61, 0xf0, 0x09, 0xa8, + 0x71, 0x38, 0xe6, 0x61, 0x93, 0x3c, 0x24, 0x5d, 0x62, 0x06, 0xae, 0x2f, 0x58, 0x2b, 0x6b, 0x57, + 0xb4, 0xa4, 0x4e, 0xe3, 0x1d, 0xd3, 0xbc, 0x4e, 0x8b, 0x0b, 0x98, 0xc6, 0x0b, 0x43, 0xdb, 0x5f, + 0xd5, 0xee, 0xe3, 0x5d, 0xd2, 0x8d, 0x5c, 0x8d, 0x73, 0x83, 0x7e, 0xb3, 0xf6, 0x20, 0x8f, 0x88, + 0x8a, 0x24, 0xd0, 0x05, 0x0b, 0xee, 0xee, 0x63, 0x62, 0x06, 0x31, 0xed, 0xe4, 0xcb, 0xd3, 0xc2, + 0x41, 0xbf, 0xb9, 0xb0, 0x99, 0x81, 0x43, 0x39, 0x78, 0xf8, 0x15, 0x98, 0xf7, 0x65, 0xdc, 0xa8, + 0xd7, 0x25, 0xac, 0x5e, 0x5a, 0x2e, 0x5d, 0xae, 0xac, 0x19, 0xda, 0xc8, 0xed, 0xa8, 0xf1, 0xc0, + 0x2c, 0xee, 0xbc, 0x43, 0x83, 0xf6, 0xa6, 0x47, 0x42, 0x3d, 0x33, 0xce, 0xc9, 0xc4, 0xcf, 0xa3, + 0x34, 0x01, 0xca, 0xf2, 0xc1, 0xef, 0x15, 0x70, 0x96, 0x3c, 0x31, 0xbb, 0x3d, 0x8b, 0x64, 0xec, + 0xea, 0xe5, 0x53, 0x5b, 0xc8, 0xab, 0x72, 0x21, 0x67, 0x6f, 0x0e, 0xe1, 0x41, 0x43, 0xd9, 0xe1, + 0x0d, 0x50, 0xb1, 0x79, 0x51, 0x6c, 0xb9, 0x5d, 0x6a, 0x1e, 0xd4, 0xcf, 0x88, 0x52, 0x52, 0x07, + 0xfd, 0x66, 0x65, 0x23, 0x11, 0x1f, 0xf5, 0x9b, 0x8b, 0xa9, 0xcf, 0x8f, 0x0e, 0x3c, 0x82, 0xd2, + 0x6e, 0xea, 0x0b, 0x05, 0x9c, 0x3f, 0x66, 0x55, 0xf0, 0x5a, 0x92, 0x79, 0x51, 0x1a, 0x75, 0x65, + 0xb9, 0x74, 0x79, 0xd6, 0xa8, 0xa5, 0x33, 0x26, 0x14, 0x28, 0x6b, 0x07, 0xbf, 0x56, 0x00, 0xf4, + 0x0b, 0x78, 0xb2, 0x50, 0xae, 0x8d, 0x92, 0x2f, 0x6d, 0x48, 0x92, 0x96, 0x64, 0x92, 0x60, 0x51, + 0x87, 0x86, 0xd0, 0xa9, 0x18, 0xcc, 0x6e, 0x61, 0x1f, 0xdb, 0xf7, 0xa8, 0x63, 0xf1, 0xbe, 0xc3, + 0x1e, 0xdd, 0x26, 0xbe, 0xe8, 0x3b, 0x25, 0xdb, 0x77, 0xeb, 0x5b, 0x77, 0xa4, 0x06, 0xa5, 0xac, + 0x78, 0x37, 0x77, 0xa8, 0x63, 0xc9, 0x2e, 0x8d, 0xbb, 0x99, 0xe3, 0x21, 0xa1, 0x51, 0x1f, 0x81, + 0x19, 0x41, 0xc1, 0x0f, 0x8e, 0x93, 0x7b, 0x5f, 0x07, 0xb3, 0x71, 0x3f, 0x49, 0xd0, 0x9a, 0x34, + 0x9b, 0x8d, 0x7b, 0x0f, 0x25, 0x36, 0xea, 0x0f, 0x0a, 0x98, 0xe3, 0x5b, 0x76, 0xbd, 0x4d, 0xcc, + 0x0e, 0x3f, 0xca, 0xbe, 0x51, 0x00, 0x24, 0xf9, 0x03, 0x2e, 0xdc, 0x97, 0xca, 0xda, 0xfb, 0x63, + 0x14, 0x62, 0xe1, 0x94, 0x4c, 0xb2, 0x5b, 0x50, 0x31, 0x34, 0x84, 0x53, 0xfd, 0x65, 0x12, 0x5c, + 0xd8, 0xc6, 0x5d, 0x6a, 0xe1, 0x80, 0x3a, 0xad, 0xf5, 0x88, 0x2e, 0x2c, 0x2b, 0xf8, 0x29, 0x98, + 0xe1, 0x1d, 0x6f, 0xe1, 0x00, 0xcb, 0x63, 0xe9, 0xcd, 0xd1, 0xce, 0x87, 0xf0, 0x30, 0xd8, 0x20, + 0x01, 0x4e, 0xb6, 0x27, 0x91, 0xa1, 0x18, 0x15, 0x3e, 0x06, 0x65, 0xe6, 0x11, 0x53, 0x16, 0xd5, + 0x87, 0x63, 0xc4, 0x7e, 0xec, 0xaa, 0x1f, 0x7a, 0xc4, 0x4c, 0x36, 0x8e, 0x7f, 0x21, 0xc1, 0x01, + 0x7d, 0x30, 0xcd, 0x02, 0x1c, 0xf4, 0x98, 0xb8, 0x12, 0x2a, 0x6b, 0x77, 0x4f, 0x85, 0x4d, 0x20, + 0x1a, 0x0b, 0x92, 0x6f, 0x3a, 0xfc, 0x46, 0x92, 0x49, 0xfd, 0x53, 0x01, 0xcb, 0xc7, 0xfa, 0x1a, + 0xd4, 0xb1, 0x78, 0x3d, 0xfc, 0xf7, 0x69, 0xfe, 0x2c, 0x93, 0xe6, 0xcd, 0xd3, 0x08, 0x5c, 0x2e, + 0xfe, 0xb8, 0x6c, 0xab, 0x7f, 0x28, 0xe0, 0xd2, 0x49, 0xce, 0xf7, 0x29, 0x0b, 0xe0, 0x27, 0x85, + 0xe8, 0xb5, 0x11, 0x2f, 0x21, 0xca, 0xc2, 0xd8, 0xe3, 0x41, 0x20, 0x92, 0xa4, 0x22, 0xf7, 0xc0, + 0x14, 0x0d, 0x88, 0xcd, 0x8f, 0x2d, 0xde, 0x5d, 0xf7, 0x4e, 0x31, 0x74, 0x63, 0x5e, 0xf2, 0x4e, + 0xdd, 0xe1, 0x0c, 0x28, 0x24, 0x52, 0xbf, 0x2d, 0x9d, 0x1c, 0x38, 0xcf, 0x13, 0x3f, 0xcc, 0x3c, + 0x21, 0x7c, 0x90, 0x1c, 0x38, 0xf1, 0x36, 0x6e, 0xc5, 0x1a, 0x94, 0xb2, 0x82, 0x8f, 0xc0, 0x8c, + 0x27, 0x8f, 0xaa, 0x21, 0x37, 0xf6, 0x49, 0x11, 0x45, 0xa7, 0x9c, 0x31, 0xc7, 0xb3, 0x15, 0x7d, + 0xa1, 0x18, 0x12, 0xf6, 0xc0, 0x82, 0x9d, 0x19, 0x51, 0x64, 0xab, 0xbc, 0x3b, 0x06, 0x49, 0x76, + 0xc6, 0x09, 0x87, 0x83, 0xac, 0x0c, 0xe5, 0x48, 0xe0, 0x0e, 0xa8, 0xed, 0xcb, 0x8c, 0xb9, 0xce, + 0xba, 0x19, 0xde, 0x33, 0x65, 0x71, 0x4d, 0xad, 0xf0, 0x91, 0x66, 0x3b, 0xaf, 0x3c, 0xea, 0x37, + 0xab, 0x79, 0x21, 0x2a, 0x62, 0xa8, 0xbf, 0x2b, 0xe0, 0xe2, 0xb1, 0x7b, 0xf1, 0x3f, 0x54, 0x1f, + 0xcd, 0x56, 0xdf, 0x8d, 0x53, 0xa9, 0xbe, 0xe1, 0x65, 0xf7, 0xe3, 0xd4, 0x3f, 0x84, 0x2a, 0xea, + 0x0d, 0x83, 0x59, 0x2f, 0xba, 0x49, 0x65, 0xac, 0x57, 0xc7, 0x2d, 0x1e, 0xee, 0x6b, 0xcc, 0xf3, + 0xab, 0x2e, 0xfe, 0x44, 0x09, 0x2a, 0xfc, 0x02, 0x54, 0x6d, 0x39, 0x4b, 0x73, 0x00, 0xea, 0x04, + 0xd1, 0xbc, 0xf0, 0x2f, 0x2a, 0xe8, 0xec, 0xa0, 0xdf, 0xac, 0x6e, 0xe4, 0x60, 0x51, 0x81, 0x08, + 0x76, 0x41, 0x25, 0xa9, 0x80, 0x68, 0xc0, 0x7c, 0xeb, 0x25, 0x52, 0xee, 0x3a, 0xc6, 0x2b, 0x32, + 0xc7, 0x95, 0x44, 0xc6, 0x50, 0x1a, 0x1e, 0xde, 0x07, 0xf3, 0x7b, 0x98, 0x76, 0x7b, 0x3e, 0x91, + 0xa3, 0x5b, 0x59, 0x34, 0xf0, 0x6b, 0x7c, 0xac, 0xba, 0x95, 0x56, 0x1c, 0xf5, 0x9b, 0xb5, 0x8c, + 0x40, 0x8c, 0x6f, 0x59, 0x67, 0xf8, 0x54, 0x01, 0x55, 0x9c, 0x7d, 0x68, 0xb1, 0xfa, 0x94, 0x88, + 0xe0, 0xbd, 0x31, 0x22, 0xc8, 0xbd, 0xd5, 0x8c, 0xba, 0x0c, 0xa3, 0x9a, 0x53, 0x30, 0x54, 0x60, + 0x83, 0x5f, 0x82, 0x45, 0x3b, 0xf3, 0x0e, 0x62, 0xf5, 0x69, 0xb1, 0x80, 0xb1, 0xb7, 0x2e, 0x46, + 0x48, 0xde, 0x7c, 0x59, 0x39, 0x43, 0x79, 0x2a, 0xf5, 0xa7, 0x49, 0xd0, 0x3c, 0xe1, 0x92, 0x85, + 0x77, 0x01, 0x74, 0x77, 0x19, 0xf1, 0xf7, 0x89, 0x75, 0x3b, 0x7c, 0xa7, 0x46, 0x53, 0x60, 0x29, + 0x19, 0x7c, 0x36, 0x0b, 0x16, 0x68, 0x88, 0x17, 0xb4, 0xc1, 0x5c, 0x90, 0x9a, 0xc9, 0xc6, 0x99, + 0x6a, 0x65, 0xa8, 0xe9, 0x91, 0xce, 0xa8, 0x0e, 0xfa, 0xcd, 0xcc, 0x90, 0x87, 0x32, 0xf0, 0xd0, + 0x04, 0xc0, 0x4c, 0xf2, 0x1a, 0x96, 0xa6, 0x3e, 0xda, 0x41, 0x93, 0x64, 0x33, 0xbe, 0x1c, 0x52, + 0x89, 0x4c, 0xc1, 0xaa, 0x7f, 0x29, 0x00, 0x24, 0xf5, 0x0a, 0x2f, 0x81, 0xd4, 0x53, 0x54, 0xde, + 0x2f, 0x65, 0x0e, 0x81, 0x52, 0x72, 0xfe, 0x52, 0xb6, 0x09, 0x63, 0xb8, 0x15, 0x0d, 0xb3, 0xf1, + 0x4b, 0x79, 0x23, 0x14, 0xa3, 0x48, 0x0f, 0x77, 0xc0, 0xb4, 0x4f, 0x30, 0x73, 0x1d, 0xf9, 0xa6, + 0xfe, 0x80, 0x0f, 0x3c, 0x48, 0x48, 0x8e, 0xfa, 0xcd, 0xd5, 0x51, 0xfe, 0xc9, 0xd0, 0xe4, 0x7c, + 0x24, 0x9c, 0x90, 0x84, 0x83, 0xb7, 0x41, 0x4d, 0x72, 0xa4, 0x16, 0x1c, 0xf6, 0xd3, 0x05, 0xb9, + 0x9a, 0xda, 0x46, 0xde, 0x00, 0x15, 0x7d, 0x8c, 0xcd, 0x67, 0x87, 0x8d, 0x89, 0xe7, 0x87, 0x8d, + 0x89, 0x17, 0x87, 0x8d, 0x89, 0xa7, 0x83, 0x86, 0xf2, 0x6c, 0xd0, 0x50, 0x9e, 0x0f, 0x1a, 0xca, + 0x8b, 0x41, 0x43, 0xf9, 0x75, 0xd0, 0x50, 0xbe, 0xfb, 0xad, 0x31, 0xf1, 0xf1, 0xca, 0xc8, 0xff, + 0x1e, 0xfd, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x08, 0xaf, 0xaa, 0x52, 0x82, 0x12, 0x00, 0x00, +} + +func (m *AuditAnnotation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditAnnotation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditAnnotation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ValueExpression) + copy(dAtA[i:], m.ValueExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValueExpression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExpressionWarning) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExpressionWarning) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExpressionWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Warning) + copy(dAtA[i:], m.Warning) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Warning))) + i-- + dAtA[i] = 0x1a + i -= len(m.FieldRef) + copy(dAtA[i:], m.FieldRef) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldRef))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} + +func (m *MatchCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MatchCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MatchResources) Marshal() (dAtA []byte, err error) { @@ -631,6 +897,43 @@ func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *TypeChecking) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ExpressionWarnings) > 0 { + for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -651,6 +954,16 @@ func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, erro _ = i var l int _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a { size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -784,6 +1097,15 @@ func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) _ = i var l int _ = l + if len(m.ValidationActions) > 0 { + for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ValidationActions[iNdEx]) + copy(dAtA[i:], m.ValidationActions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } if m.MatchResources != nil { { size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i]) @@ -883,6 +1205,34 @@ func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, _ = i var l int _ = l + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.AuditAnnotations) > 0 { + for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } if m.FailurePolicy != nil { i -= len(*m.FailurePolicy) copy(dAtA[i:], *m.FailurePolicy) @@ -931,7 +1281,7 @@ func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, return len(dAtA) - i, nil } -func (m *Validation) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -941,27 +1291,84 @@ func (m *Validation) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Validation) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Reason != nil { - i -= len(*m.Reason) - copy(dAtA[i:], *m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) - i-- - dAtA[i] = 0x1a - } - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.TypeChecking != nil { + { + size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *Validation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Validation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MessageExpression) + copy(dAtA[i:], m.MessageExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression))) + i-- + dAtA[i] = 0x22 + if m.Reason != nil { + i -= len(*m.Reason) + copy(dAtA[i:], *m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- dAtA[i] = 0x12 i -= len(m.Expression) copy(dAtA[i:], m.Expression) @@ -982,6 +1389,45 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *AuditAnnotation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ValueExpression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExpressionWarning) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FieldRef) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Warning) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *MatchCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *MatchResources) Size() (n int) { if m == nil { return 0 @@ -1058,6 +1504,21 @@ func (m *ParamRef) Size() (n int) { return n } +func (m *TypeChecking) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ExpressionWarnings) > 0 { + for _, e := range m.ExpressionWarnings { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *ValidatingAdmissionPolicy) Size() (n int) { if m == nil { return 0 @@ -1068,6 +1529,8 @@ func (m *ValidatingAdmissionPolicy) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -1117,6 +1580,12 @@ func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) { l = m.MatchResources.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.ValidationActions) > 0 { + for _, s := range m.ValidationActions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1161,6 +1630,38 @@ func (m *ValidatingAdmissionPolicySpec) Size() (n int) { l = len(*m.FailurePolicy) n += 1 + l + sovGenerated(uint64(l)) } + if len(m.AuditAnnotations) > 0 { + for _, e := range m.AuditAnnotations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if m.TypeChecking != nil { + l = m.TypeChecking.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1178,6 +1679,8 @@ func (m *Validation) Size() (n int) { l = len(*m.Reason) n += 1 + l + sovGenerated(uint64(l)) } + l = len(m.MessageExpression) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -1187,6 +1690,39 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *AuditAnnotation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuditAnnotation{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`, + `}`, + }, "") + return s +} +func (this *ExpressionWarning) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExpressionWarning{`, + `FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`, + `Warning:` + fmt.Sprintf("%v", this.Warning) + `,`, + `}`, + }, "") + return s +} +func (this *MatchCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MatchCondition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} func (this *MatchResources) String() string { if this == nil { return "nil" @@ -1244,6 +1780,21 @@ func (this *ParamRef) String() string { }, "") return s } +func (this *TypeChecking) String() string { + if this == nil { + return "nil" + } + repeatedStringForExpressionWarnings := "[]ExpressionWarning{" + for _, f := range this.ExpressionWarnings { + repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + "," + } + repeatedStringForExpressionWarnings += "}" + s := strings.Join([]string{`&TypeChecking{`, + `ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`, + `}`, + }, "") + return s +} func (this *ValidatingAdmissionPolicy) String() string { if this == nil { return "nil" @@ -1251,6 +1802,7 @@ func (this *ValidatingAdmissionPolicy) String() string { s := strings.Join([]string{`&ValidatingAdmissionPolicy{`, `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1290,6 +1842,7 @@ func (this *ValidatingAdmissionPolicyBindingSpec) String() string { `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`, `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`, `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`, + `ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`, `}`, }, "") return s @@ -1314,39 +1867,411 @@ func (this *ValidatingAdmissionPolicySpec) String() string { if this == nil { return "nil" } - repeatedStringForValidations := "[]Validation{" - for _, f := range this.Validations { - repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + "," + repeatedStringForValidations := "[]Validation{" + for _, f := range this.Validations { + repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + "," + } + repeatedStringForValidations += "}" + repeatedStringForAuditAnnotations := "[]AuditAnnotation{" + for _, f := range this.AuditAnnotations { + repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + "," + } + repeatedStringForAuditAnnotations += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`, + `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, + `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, + `Validations:` + repeatedStringForValidations + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *Validation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Validation{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + valueToStringGenerated(this.Reason) + `,`, + `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValueExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldRef = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warning = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - repeatedStringForValidations += "}" - s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`, - `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, - `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, - `Validations:` + repeatedStringForValidations + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `}`, - }, "") - return s + return nil } -func (this *Validation) String() string { - if this == nil { - return "nil" +func (m *MatchCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - s := strings.Join([]string{`&Validation{`, - `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Reason:` + valueToStringGenerated(this.Reason) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } func (m *MatchResources) Unmarshal(dAtA []byte) error { l := len(dAtA) @@ -1914,6 +2839,90 @@ func (m *ParamRef) Unmarshal(dAtA []byte) error { } return nil } +func (m *TypeChecking) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TypeChecking: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TypeChecking: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpressionWarnings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExpressionWarnings = append(m.ExpressionWarnings, ExpressionWarning{}) + if err := m.ExpressionWarnings[len(m.ExpressionWarnings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ValidatingAdmissionPolicy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1945,7 +2954,40 @@ func (m *ValidatingAdmissionPolicy) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1972,13 +3014,13 @@ func (m *ValidatingAdmissionPolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2005,7 +3047,7 @@ func (m *ValidatingAdmissionPolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2396,6 +3438,38 @@ func (m *ValidatingAdmissionPolicyBindingSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidationActions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidationActions = append(m.ValidationActions, ValidationAction(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2702,6 +3776,213 @@ func (m *ValidatingAdmissionPolicySpec) Unmarshal(dAtA []byte) error { s := FailurePolicyType(dAtA[iNdEx:postIndex]) m.FailurePolicy = &s iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuditAnnotations = append(m.AuditAnnotations, AuditAnnotation{}) + if err := m.AuditAnnotations[len(m.AuditAnnotations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicyStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeChecking", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TypeChecking == nil { + m.TypeChecking = &TypeChecking{} + } + if err := m.TypeChecking.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2849,6 +4130,38 @@ func (m *Validation) Unmarshal(dAtA []byte) error { s := k8s_io_apimachinery_pkg_apis_meta_v1.StatusReason(dAtA[iNdEx:postIndex]) m.Reason = &s iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MessageExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto index fe8236cd3..c718c5464 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto @@ -29,6 +29,84 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/admissionregistration/v1alpha1"; +// AuditAnnotation describes how to produce an audit annotation for an API request. +message AuditAnnotation { + // key specifies the audit annotation key. The audit annotation keys of + // a ValidatingAdmissionPolicy must be unique. The key must be a qualified + // name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. + // + // The key is combined with the resource name of the + // ValidatingAdmissionPolicy to construct an audit annotation key: + // "{ValidatingAdmissionPolicy name}/{key}". + // + // If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy + // and the same audit annotation key, the annotation key will be identical. + // In this case, the first annotation written with the key will be included + // in the audit event and all subsequent annotations with the same key + // will be discarded. + // + // Required. + optional string key = 1; + + // valueExpression represents the expression which is evaluated by CEL to + // produce an audit annotation value. The expression must evaluate to either + // a string or null value. If the expression evaluates to a string, the + // audit annotation is included with the string value. If the expression + // evaluates to null or empty string the audit annotation will be omitted. + // The valueExpression may be no longer than 5kb in length. + // If the result of the valueExpression is more than 10kb in length, it + // will be truncated to 10kb. + // + // If multiple ValidatingAdmissionPolicyBinding resources match an + // API request, then the valueExpression will be evaluated for + // each binding. All unique values produced by the valueExpressions + // will be joined together in a comma-separated list. + // + // Required. + optional string valueExpression = 2; +} + +// ExpressionWarning is a warning information that targets a specific expression. +message ExpressionWarning { + // The path to the field that refers the expression. + // For example, the reference to the expression of the first item of + // validations is "spec.validations[0].expression" + optional string fieldRef = 2; + + // The content of type checking information in a human-readable form. + // Each line of the warning contains the type that the expression is checked + // against, followed by the type check error from the compiler. + optional string warning = 3; +} + +message MatchCondition { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + optional string name = 1; + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + optional string expression = 2; +} + // MatchResources decides whether to run the admission control policy on an object based // on whether it meets the match criteria. // The exclude rules take precedence over include rules (if a resource matches both, it is excluded) @@ -161,6 +239,15 @@ message ParamRef { optional string namespace = 2; } +// TypeChecking contains results of type checking the expressions in the +// ValidatingAdmissionPolicy +message TypeChecking { + // The type checking warnings for each expression. + // +optional + // +listType=atomic + repeated ExpressionWarning expressionWarnings = 1; +} + // ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it. message ValidatingAdmissionPolicy { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. @@ -169,6 +256,13 @@ message ValidatingAdmissionPolicy { // Specification of the desired behavior of the ValidatingAdmissionPolicy. optional ValidatingAdmissionPolicySpec spec = 2; + + // The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy + // behaves in the expected way. + // Populated by the system. + // Read-only. + // +optional + optional ValidatingAdmissionPolicyStatus status = 3; } // ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. @@ -213,6 +307,48 @@ message ValidatingAdmissionPolicyBindingSpec { // Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required. // +optional optional MatchResources matchResources = 3; + + // validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. + // If a validation evaluates to false it is always enforced according to these actions. + // + // Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according + // to these actions only if the FailurePolicy is set to Fail, otherwise the failures are + // ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. + // + // validationActions is declared as a set of action values. Order does + // not matter. validationActions may not contain duplicates of the same action. + // + // The supported actions values are: + // + // "Deny" specifies that a validation failure results in a denied request. + // + // "Warn" specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + // + // "Audit" specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failures, formatted as + // a JSON list of objects, each with the following fields: + // - message: The validation failure message string + // - policy: The resource name of the ValidatingAdmissionPolicy + // - binding: The resource name of the ValidatingAdmissionPolicyBinding + // - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy + // - validationActions: The enforcement actions enacted for the validation failure + // Example audit annotation: + // `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"` + // + // Clients should expect to handle additional values by ignoring + // any values not recognized. + // + // "Deny" and "Warn" may not be used together since this combination + // needlessly duplicates the validation failure both in the + // API response body and the HTTP warning headers. + // + // Required. + // +listType=set + repeated string validationActions = 4; } // ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy. @@ -243,30 +379,91 @@ message ValidatingAdmissionPolicySpec { optional MatchResources matchConstraints = 2; // Validations contain CEL expressions which is used to apply the validation. - // A minimum of one validation is required for a policy definition. + // Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is + // required. // +listType=atomic - // Required. + // +optional repeated Validation validations = 3; - // FailurePolicy defines how to handle failures for the admission policy. - // Failures can occur from invalid or mis-configured policy definitions or bindings. + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // // A policy is invalid if spec.paramKind refers to a non-existent Kind. // A binding is invalid if spec.paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions + // define how failures are enforced. + // // Allowed values are Ignore or Fail. Defaults to Fail. // +optional optional string failurePolicy = 4; + + // auditAnnotations contains CEL expressions which are used to produce audit + // annotations for the audit event of the API request. + // validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is + // required. + // +listType=atomic + // +optional + repeated AuditAnnotation auditAnnotations = 5; + + // MatchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated MatchCondition matchConditions = 6; +} + +// ValidatingAdmissionPolicyStatus represents the status of a ValidatingAdmissionPolicy. +message ValidatingAdmissionPolicyStatus { + // The generation observed by the controller. + // +optional + optional int64 observedGeneration = 1; + + // The results of type checking for each expression. + // Presence of this field indicates the completion of the type checking. + // +optional + optional TypeChecking typeChecking = 2; + + // The conditions represent the latest available observations of a policy's current state. + // +optional + // +listType=map + // +listMapKey=type + repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; } // Validation specifies the CEL expression which is used to apply the validation. message Validation { // Expression represents the expression which will be evaluated by CEL. // ref: https://github.com/google/cel-spec - // CEL expressions have access to the contents of the Admission request/response, organized into CEL variables as well as some other useful variables: + // CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: // - // 'object' - The object from the incoming request. The value is null for DELETE requests. - // 'oldObject' - The existing object. The value is null for CREATE requests. - // 'request' - Attributes of the admission request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - // 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. // // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the // object. No other metadata properties are accessible. @@ -313,5 +510,18 @@ message Validation { // If not set, StatusReasonInvalid is used in the response to the client. // +optional optional string reason = 3; + + // messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. + // If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. + // Example: + // "object.x must be less than max ("+string(params.max)+")" + // +optional + optional string messageExpression = 4; } diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go index b64bc628f..2bbb55a47 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go @@ -74,6 +74,49 @@ type ValidatingAdmissionPolicy struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the ValidatingAdmissionPolicy. Spec ValidatingAdmissionPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy + // behaves in the expected way. + // Populated by the system. + // Read-only. + // +optional + Status ValidatingAdmissionPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ValidatingAdmissionPolicyStatus represents the status of a ValidatingAdmissionPolicy. +type ValidatingAdmissionPolicyStatus struct { + // The generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + // The results of type checking for each expression. + // Presence of this field indicates the completion of the type checking. + // +optional + TypeChecking *TypeChecking `json:"typeChecking,omitempty" protobuf:"bytes,2,opt,name=typeChecking"` + // The conditions represent the latest available observations of a policy's current state. + // +optional + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"` +} + +// TypeChecking contains results of type checking the expressions in the +// ValidatingAdmissionPolicy +type TypeChecking struct { + // The type checking warnings for each expression. + // +optional + // +listType=atomic + ExpressionWarnings []ExpressionWarning `json:"expressionWarnings,omitempty" protobuf:"bytes,1,rep,name=expressionWarnings"` +} + +// ExpressionWarning is a warning information that targets a specific expression. +type ExpressionWarning struct { + // The path to the field that refers the expression. + // For example, the reference to the expression of the first item of + // validations is "spec.validations[0].expression" + FieldRef string `json:"fieldRef" protobuf:"bytes,2,opt,name=fieldRef"` + // The content of type checking information in a human-readable form. + // Each line of the warning contains the type that the expression is checked + // against, followed by the type check error from the compiler. + Warning string `json:"warning" protobuf:"bytes,3,opt,name=warning"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -107,20 +150,61 @@ type ValidatingAdmissionPolicySpec struct { MatchConstraints *MatchResources `json:"matchConstraints,omitempty" protobuf:"bytes,2,rep,name=matchConstraints"` // Validations contain CEL expressions which is used to apply the validation. - // A minimum of one validation is required for a policy definition. + // Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is + // required. // +listType=atomic - // Required. - Validations []Validation `json:"validations" protobuf:"bytes,3,rep,name=validations"` + // +optional + Validations []Validation `json:"validations,omitempty" protobuf:"bytes,3,rep,name=validations"` - // FailurePolicy defines how to handle failures for the admission policy. - // Failures can occur from invalid or mis-configured policy definitions or bindings. + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // // A policy is invalid if spec.paramKind refers to a non-existent Kind. // A binding is invalid if spec.paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions + // define how failures are enforced. + // // Allowed values are Ignore or Fail. Defaults to Fail. // +optional FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // auditAnnotations contains CEL expressions which are used to produce audit + // annotations for the audit event of the API request. + // validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is + // required. + // +listType=atomic + // +optional + AuditAnnotations []AuditAnnotation `json:"auditAnnotations,omitempty" protobuf:"bytes,5,rep,name=auditAnnotations"` + + // MatchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=matchConditions"` } +type MatchCondition v1.MatchCondition + // ParamKind is a tuple of Group Kind and Version. // +structType=atomic type ParamKind struct { @@ -138,12 +222,16 @@ type ParamKind struct { type Validation struct { // Expression represents the expression which will be evaluated by CEL. // ref: https://github.com/google/cel-spec - // CEL expressions have access to the contents of the Admission request/response, organized into CEL variables as well as some other useful variables: + // CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: // - //'object' - The object from the incoming request. The value is null for DELETE requests. - //'oldObject' - The existing object. The value is null for CREATE requests. - //'request' - Attributes of the admission request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - //'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. // // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the // object. No other metadata properties are accessible. @@ -188,6 +276,55 @@ type Validation struct { // If not set, StatusReasonInvalid is used in the response to the client. // +optional Reason *metav1.StatusReason `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. + // If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. + // Example: + // "object.x must be less than max ("+string(params.max)+")" + // +optional + MessageExpression string `json:"messageExpression,omitempty" protobuf:"bytes,4,opt,name=messageExpression"` +} + +// AuditAnnotation describes how to produce an audit annotation for an API request. +type AuditAnnotation struct { + // key specifies the audit annotation key. The audit annotation keys of + // a ValidatingAdmissionPolicy must be unique. The key must be a qualified + // name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. + // + // The key is combined with the resource name of the + // ValidatingAdmissionPolicy to construct an audit annotation key: + // "{ValidatingAdmissionPolicy name}/{key}". + // + // If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy + // and the same audit annotation key, the annotation key will be identical. + // In this case, the first annotation written with the key will be included + // in the audit event and all subsequent annotations with the same key + // will be discarded. + // + // Required. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + + // valueExpression represents the expression which is evaluated by CEL to + // produce an audit annotation value. The expression must evaluate to either + // a string or null value. If the expression evaluates to a string, the + // audit annotation is included with the string value. If the expression + // evaluates to null or empty string the audit annotation will be omitted. + // The valueExpression may be no longer than 5kb in length. + // If the result of the valueExpression is more than 10kb in length, it + // will be truncated to 10kb. + // + // If multiple ValidatingAdmissionPolicyBinding resources match an + // API request, then the valueExpression will be evaluated for + // each binding. All unique values produced by the valueExpressions + // will be joined together in a comma-separated list. + // + // Required. + ValueExpression string `json:"valueExpression" protobuf:"bytes,2,opt,name=valueExpression"` } // +genclient @@ -240,6 +377,48 @@ type ValidatingAdmissionPolicyBindingSpec struct { // Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required. // +optional MatchResources *MatchResources `json:"matchResources,omitempty" protobuf:"bytes,3,rep,name=matchResources"` + + // validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. + // If a validation evaluates to false it is always enforced according to these actions. + // + // Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according + // to these actions only if the FailurePolicy is set to Fail, otherwise the failures are + // ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. + // + // validationActions is declared as a set of action values. Order does + // not matter. validationActions may not contain duplicates of the same action. + // + // The supported actions values are: + // + // "Deny" specifies that a validation failure results in a denied request. + // + // "Warn" specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + // + // "Audit" specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failures, formatted as + // a JSON list of objects, each with the following fields: + // - message: The validation failure message string + // - policy: The resource name of the ValidatingAdmissionPolicy + // - binding: The resource name of the ValidatingAdmissionPolicyBinding + // - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy + // - validationActions: The enforcement actions enacted for the validation failure + // Example audit annotation: + // `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"` + // + // Clients should expect to handle additional values by ignoring + // any values not recognized. + // + // "Deny" and "Warn" may not be used together since this combination + // needlessly duplicates the validation failure both in the + // API response body and the HTTP warning headers. + // + // Required. + // +listType=set + ValidationActions []ValidationAction `json:"validationActions,omitempty" protobuf:"bytes,4,rep,name=validationActions"` } // ParamRef references a parameter resource @@ -344,6 +523,24 @@ type MatchResources struct { MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,7,opt,name=matchPolicy,casttype=MatchPolicyType"` } +// ValidationAction specifies a policy enforcement action. +// +enum +type ValidationAction string + +const ( + // Deny specifies that a validation failure results in a denied request. + Deny ValidationAction = "Deny" + // Warn specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + Warn ValidationAction = "Warn" + // Audit specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failure. + Audit ValidationAction = "Audit" +) + // NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames. // +structType=atomic type NamedRuleWithOperations struct { diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go index a670bb206..b3cac1821 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go @@ -24,9 +24,29 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AuditAnnotation = map[string]string{ + "": "AuditAnnotation describes how to produce an audit annotation for an API request.", + "key": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.", + "valueExpression": "valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb.\n\nIf multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list.\n\nRequired.", +} + +func (AuditAnnotation) SwaggerDoc() map[string]string { + return map_AuditAnnotation +} + +var map_ExpressionWarning = map[string]string{ + "": "ExpressionWarning is a warning information that targets a specific expression.", + "fieldRef": "The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \"spec.validations[0].expression\"", + "warning": "The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler.", +} + +func (ExpressionWarning) SwaggerDoc() map[string]string { + return map_ExpressionWarning +} + var map_MatchResources = map[string]string{ "": "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", "namespaceSelector": "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", @@ -69,10 +89,20 @@ func (ParamRef) SwaggerDoc() map[string]string { return map_ParamRef } +var map_TypeChecking = map[string]string{ + "": "TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy", + "expressionWarnings": "The type checking warnings for each expression.", +} + +func (TypeChecking) SwaggerDoc() map[string]string { + return map_TypeChecking +} + var map_ValidatingAdmissionPolicy = map[string]string{ "": "ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.", "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", "spec": "Specification of the desired behavior of the ValidatingAdmissionPolicy.", + "status": "The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy behaves in the expected way. Populated by the system. Read-only.", } func (ValidatingAdmissionPolicy) SwaggerDoc() map[string]string { @@ -100,10 +130,11 @@ func (ValidatingAdmissionPolicyBindingList) SwaggerDoc() map[string]string { } var map_ValidatingAdmissionPolicyBindingSpec = map[string]string{ - "": "ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.", - "policyName": "PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.", - "paramRef": "ParamRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied.", - "matchResources": "MatchResources declares what resources match this binding and will be validated by it. Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. If this is unset, all resources matched by the policy are validated by this binding When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.", + "": "ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.", + "policyName": "PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.", + "paramRef": "ParamRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied.", + "matchResources": "MatchResources declares what resources match this binding and will be validated by it. Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. If this is unset, all resources matched by the policy are validated by this binding When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.", + "validationActions": "validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\n\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\n\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\n\nThe supported actions values are:\n\n\"Deny\" specifies that a validation failure results in a denied request.\n\n\"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\n\n\"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]\"`\n\nClients should expect to handle additional values by ignoring any values not recognized.\n\n\"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\n\nRequired.", } func (ValidatingAdmissionPolicyBindingSpec) SwaggerDoc() map[string]string { @@ -124,19 +155,33 @@ var map_ValidatingAdmissionPolicySpec = map[string]string{ "": "ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.", "paramKind": "ParamKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null.", "matchConstraints": "MatchConstraints specifies what resources this policy is designed to validate. The AdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. Required.", - "validations": "Validations contain CEL expressions which is used to apply the validation. A minimum of one validation is required for a policy definition. Required.", - "failurePolicy": "FailurePolicy defines how to handle failures for the admission policy. Failures can occur from invalid or mis-configured policy definitions or bindings. A policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource. Allowed values are Ignore or Fail. Defaults to Fail.", + "validations": "Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required.", + "failurePolicy": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\n\nAllowed values are Ignore or Fail. Defaults to Fail.", + "auditAnnotations": "auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required.", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped", } func (ValidatingAdmissionPolicySpec) SwaggerDoc() map[string]string { return map_ValidatingAdmissionPolicySpec } +var map_ValidatingAdmissionPolicyStatus = map[string]string{ + "": "ValidatingAdmissionPolicyStatus represents the status of a ValidatingAdmissionPolicy.", + "observedGeneration": "The generation observed by the controller.", + "typeChecking": "The results of type checking for each expression. Presence of this field indicates the completion of the type checking.", + "conditions": "The conditions represent the latest available observations of a policy's current state.", +} + +func (ValidatingAdmissionPolicyStatus) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyStatus +} + var map_Validation = map[string]string{ - "": "Validation specifies the CEL expression which is used to apply the validation.", - "expression": "Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the Admission request/response, organized into CEL variables as well as some other useful variables:\n\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ > 0\"}\n - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop > 0\"}\n - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d > 0\"}\n\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\nRequired.", - "message": "Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\".", - "reason": "Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client.", + "": "Validation specifies the CEL expression which is used to apply the validation.", + "expression": "Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ > 0\"}\n - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop > 0\"}\n - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d > 0\"}\n\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\nRequired.", + "message": "Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\".", + "reason": "Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client.", + "messageExpression": "messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\"", } func (Validation) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go index 4f29ac7a9..8e4abfd08 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go @@ -26,6 +26,54 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditAnnotation) DeepCopyInto(out *AuditAnnotation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditAnnotation. +func (in *AuditAnnotation) DeepCopy() *AuditAnnotation { + if in == nil { + return nil + } + out := new(AuditAnnotation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressionWarning) DeepCopyInto(out *ExpressionWarning) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressionWarning. +func (in *ExpressionWarning) DeepCopy() *ExpressionWarning { + if in == nil { + return nil + } + out := new(ExpressionWarning) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchCondition) DeepCopyInto(out *MatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchCondition. +func (in *MatchCondition) DeepCopy() *MatchCondition { + if in == nil { + return nil + } + out := new(MatchCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MatchResources) DeepCopyInto(out *MatchResources) { *out = *in @@ -125,12 +173,34 @@ func (in *ParamRef) DeepCopy() *ParamRef { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TypeChecking) DeepCopyInto(out *TypeChecking) { + *out = *in + if in.ExpressionWarnings != nil { + in, out := &in.ExpressionWarnings, &out.ExpressionWarnings + *out = make([]ExpressionWarning, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypeChecking. +func (in *TypeChecking) DeepCopy() *TypeChecking { + if in == nil { + return nil + } + out := new(TypeChecking) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ValidatingAdmissionPolicy) DeepCopyInto(out *ValidatingAdmissionPolicy) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) return } @@ -225,6 +295,11 @@ func (in *ValidatingAdmissionPolicyBindingSpec) DeepCopyInto(out *ValidatingAdmi *out = new(MatchResources) (*in).DeepCopyInto(*out) } + if in.ValidationActions != nil { + in, out := &in.ValidationActions, &out.ValidationActions + *out = make([]ValidationAction, len(*in)) + copy(*out, *in) + } return } @@ -296,6 +371,16 @@ func (in *ValidatingAdmissionPolicySpec) DeepCopyInto(out *ValidatingAdmissionPo *out = new(FailurePolicyType) **out = **in } + if in.AuditAnnotations != nil { + in, out := &in.AuditAnnotations, &out.AuditAnnotations + *out = make([]AuditAnnotation, len(*in)) + copy(*out, *in) + } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } return } @@ -309,6 +394,34 @@ func (in *ValidatingAdmissionPolicySpec) DeepCopy() *ValidatingAdmissionPolicySp return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyStatus) DeepCopyInto(out *ValidatingAdmissionPolicyStatus) { + *out = *in + if in.TypeChecking != nil { + in, out := &in.TypeChecking, &out.TypeChecking + *out = new(TypeChecking) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyStatus. +func (in *ValidatingAdmissionPolicyStatus) DeepCopy() *ValidatingAdmissionPolicyStatus { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Validation) DeepCopyInto(out *Validation) { *out = *in diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go index 56a9f10e5..8fb354c31 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -45,10 +45,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *MatchCondition) Reset() { *m = MatchCondition{} } +func (*MatchCondition) ProtoMessage() {} +func (*MatchCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{0} +} +func (m *MatchCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MatchCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MatchCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_MatchCondition.Merge(m, src) +} +func (m *MatchCondition) XXX_Size() int { + return m.Size() +} +func (m *MatchCondition) XXX_DiscardUnknown() { + xxx_messageInfo_MatchCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_MatchCondition proto.InternalMessageInfo + func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } func (*MutatingWebhook) ProtoMessage() {} func (*MutatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{0} + return fileDescriptor_abeea74cbc46f55a, []int{1} } func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -76,7 +104,7 @@ var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } func (*MutatingWebhookConfiguration) ProtoMessage() {} func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{1} + return fileDescriptor_abeea74cbc46f55a, []int{2} } func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -104,7 +132,7 @@ var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } func (*MutatingWebhookConfigurationList) ProtoMessage() {} func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{2} + return fileDescriptor_abeea74cbc46f55a, []int{3} } func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -132,7 +160,7 @@ var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo func (m *ServiceReference) Reset() { *m = ServiceReference{} } func (*ServiceReference) ProtoMessage() {} func (*ServiceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{3} + return fileDescriptor_abeea74cbc46f55a, []int{4} } func (m *ServiceReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -160,7 +188,7 @@ var xxx_messageInfo_ServiceReference proto.InternalMessageInfo func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } func (*ValidatingWebhook) ProtoMessage() {} func (*ValidatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{4} + return fileDescriptor_abeea74cbc46f55a, []int{5} } func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -188,7 +216,7 @@ var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } func (*ValidatingWebhookConfiguration) ProtoMessage() {} func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{5} + return fileDescriptor_abeea74cbc46f55a, []int{6} } func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -216,7 +244,7 @@ var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } func (*ValidatingWebhookConfigurationList) ProtoMessage() {} func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{6} + return fileDescriptor_abeea74cbc46f55a, []int{7} } func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -244,7 +272,7 @@ var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } func (*WebhookClientConfig) ProtoMessage() {} func (*WebhookClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{7} + return fileDescriptor_abeea74cbc46f55a, []int{8} } func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -270,6 +298,7 @@ func (m *WebhookClientConfig) XXX_DiscardUnknown() { var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo func init() { + proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1beta1.MatchCondition") proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhook") proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration") proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList") @@ -285,68 +314,106 @@ func init() { } var fileDescriptor_abeea74cbc46f55a = []byte{ - // 974 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x49, 0x6f, 0xdb, 0x46, - 0x14, 0x36, 0x2d, 0x29, 0x92, 0x46, 0xb2, 0x13, 0x4d, 0x97, 0xb0, 0x6e, 0x40, 0x0a, 0x3a, 0x14, - 0xba, 0x94, 0x4c, 0x9c, 0xa2, 0x4b, 0x8a, 0x1e, 0x42, 0xb7, 0x41, 0x0b, 0xd8, 0x4e, 0x3a, 0xce, - 0x02, 0xb4, 0x29, 0x90, 0x11, 0xf5, 0x24, 0x4d, 0x45, 0x72, 0x04, 0xce, 0x50, 0xa9, 0x6f, 0xfd, - 0x09, 0xfd, 0x0b, 0xfd, 0x21, 0xbd, 0xf5, 0xe0, 0x63, 0x8e, 0xb9, 0x94, 0xa8, 0xd9, 0x5e, 0x7b, - 0xe8, 0xd5, 0xa7, 0x82, 0x8b, 0x76, 0x39, 0x21, 0x5c, 0x20, 0x27, 0xdf, 0x34, 0xdf, 0xe3, 0xf7, - 0xbd, 0x79, 0x6f, 0xde, 0x02, 0xa1, 0x6f, 0x87, 0x9f, 0x0a, 0x83, 0x71, 0x73, 0x18, 0x74, 0xc0, - 0xf7, 0x40, 0x82, 0x30, 0xc7, 0xe0, 0x75, 0xb9, 0x6f, 0x66, 0x06, 0x3a, 0x62, 0x26, 0xed, 0xba, - 0x4c, 0x08, 0xc6, 0x3d, 0x1f, 0xfa, 0x4c, 0x48, 0x9f, 0x4a, 0xc6, 0x3d, 0x73, 0x7c, 0xab, 0x03, - 0x92, 0xde, 0x32, 0xfb, 0xe0, 0x81, 0x4f, 0x25, 0x74, 0x8d, 0x91, 0xcf, 0x25, 0xc7, 0xed, 0x94, - 0x69, 0xd0, 0x11, 0x33, 0xd6, 0x32, 0x8d, 0x8c, 0xb9, 0xf3, 0x61, 0x9f, 0xc9, 0x41, 0xd0, 0x31, - 0x6c, 0xee, 0x9a, 0x7d, 0xde, 0xe7, 0x66, 0x22, 0xd0, 0x09, 0x7a, 0xc9, 0x29, 0x39, 0x24, 0xbf, - 0x52, 0xe1, 0x9d, 0xdb, 0x39, 0xae, 0xb4, 0x7c, 0x9b, 0x9d, 0x8f, 0x66, 0x24, 0x97, 0xda, 0x03, - 0xe6, 0x81, 0x7f, 0x6c, 0x8e, 0x86, 0xfd, 0x18, 0x10, 0xa6, 0x0b, 0x92, 0xae, 0x63, 0x99, 0xe7, - 0xb1, 0xfc, 0xc0, 0x93, 0xcc, 0x85, 0x15, 0xc2, 0xc7, 0xaf, 0x23, 0x08, 0x7b, 0x00, 0x2e, 0x5d, - 0xe6, 0xb5, 0x7e, 0x2f, 0xa3, 0xab, 0x07, 0x81, 0xa4, 0x92, 0x79, 0xfd, 0x27, 0xd0, 0x19, 0x70, - 0x3e, 0xc4, 0x4d, 0x54, 0xf4, 0xa8, 0x0b, 0xaa, 0xd2, 0x54, 0xda, 0x55, 0xab, 0x7e, 0x12, 0xea, - 0x1b, 0x51, 0xa8, 0x17, 0x0f, 0xa9, 0x0b, 0x24, 0xb1, 0xe0, 0xe7, 0xa8, 0x6e, 0x3b, 0x0c, 0x3c, - 0xb9, 0xc7, 0xbd, 0x1e, 0xeb, 0xab, 0x9b, 0x4d, 0xa5, 0x5d, 0xdb, 0xfd, 0xc2, 0xc8, 0x9b, 0x79, - 0x23, 0x73, 0xb5, 0x37, 0x27, 0x62, 0xbd, 0x9d, 0x39, 0xaa, 0xcf, 0xa3, 0x64, 0xc1, 0x11, 0x7e, - 0x8a, 0x4a, 0x7e, 0xe0, 0x80, 0x50, 0x0b, 0xcd, 0x42, 0xbb, 0xb6, 0xfb, 0x49, 0x1e, 0x8f, 0x06, - 0x09, 0x1c, 0x78, 0xc2, 0xe4, 0xe0, 0xfe, 0x08, 0x52, 0x50, 0x58, 0x5b, 0x99, 0xaf, 0x52, 0x6c, - 0x13, 0x24, 0x15, 0xc5, 0xfb, 0x68, 0xab, 0x47, 0x99, 0x13, 0xf8, 0xf0, 0x80, 0x3b, 0xcc, 0x3e, - 0x56, 0x8b, 0x49, 0x06, 0x3e, 0x88, 0x42, 0x7d, 0xeb, 0xde, 0xbc, 0xe1, 0x2c, 0xd4, 0x1b, 0x0b, - 0xc0, 0xc3, 0xe3, 0x11, 0x90, 0x45, 0x32, 0xfe, 0x12, 0xd5, 0x5c, 0x2a, 0xed, 0x41, 0xa6, 0x55, - 0x4d, 0xb4, 0x5a, 0x51, 0xa8, 0xd7, 0x0e, 0x66, 0xf0, 0x59, 0xa8, 0x5f, 0x9d, 0x3b, 0x26, 0x3a, - 0xf3, 0x34, 0xfc, 0x13, 0x6a, 0xc4, 0x29, 0x17, 0x23, 0x6a, 0xc3, 0x11, 0x38, 0x60, 0x4b, 0xee, - 0xab, 0xa5, 0x24, 0xdf, 0xb7, 0xe7, 0xa2, 0x9f, 0x3e, 0xba, 0x31, 0x1a, 0xf6, 0x63, 0x40, 0x18, - 0x71, 0x6d, 0xc5, 0xe1, 0xef, 0xd3, 0x0e, 0x38, 0x13, 0xaa, 0xf5, 0x4e, 0x14, 0xea, 0x8d, 0xc3, - 0x65, 0x45, 0xb2, 0xea, 0x04, 0x73, 0xb4, 0xcd, 0x3b, 0x3f, 0x82, 0x2d, 0xa7, 0x6e, 0x6b, 0x17, - 0x77, 0x8b, 0xa3, 0x50, 0xdf, 0xbe, 0xbf, 0x20, 0x47, 0x96, 0xe4, 0xe3, 0x84, 0x09, 0xd6, 0x85, - 0xaf, 0x7a, 0x3d, 0xb0, 0xa5, 0x50, 0xaf, 0xcc, 0x12, 0x76, 0x34, 0x83, 0xe3, 0x84, 0xcd, 0x8e, - 0x7b, 0x0e, 0x15, 0x82, 0xcc, 0xd3, 0xf0, 0x1d, 0xb4, 0x1d, 0x17, 0x3c, 0x0f, 0xe4, 0x11, 0xd8, - 0xdc, 0xeb, 0x0a, 0xb5, 0xdc, 0x54, 0xda, 0xa5, 0xf4, 0x06, 0x0f, 0x17, 0x2c, 0x64, 0xe9, 0x4b, - 0xfc, 0x08, 0x5d, 0x9f, 0x56, 0x11, 0x81, 0x31, 0x83, 0xe7, 0x8f, 0xc1, 0x8f, 0x0f, 0x42, 0xad, - 0x34, 0x0b, 0xed, 0xaa, 0xf5, 0x7e, 0x14, 0xea, 0xd7, 0xef, 0xae, 0xff, 0x84, 0x9c, 0xc7, 0xc5, - 0xcf, 0x10, 0xf6, 0x81, 0x79, 0x63, 0x6e, 0x27, 0xe5, 0x97, 0x15, 0x04, 0x4a, 0xe2, 0xbb, 0x19, - 0x85, 0x3a, 0x26, 0x2b, 0xd6, 0xb3, 0x50, 0x7f, 0x77, 0x15, 0x4d, 0xca, 0x63, 0x8d, 0x56, 0xeb, - 0x0f, 0x05, 0xdd, 0x58, 0x6a, 0xe3, 0xb4, 0x63, 0x82, 0xb4, 0xe2, 0xf1, 0x33, 0x54, 0x89, 0x1f, - 0xa6, 0x4b, 0x25, 0x4d, 0xfa, 0xba, 0xb6, 0x7b, 0x33, 0xdf, 0x33, 0xa6, 0x6f, 0x76, 0x00, 0x92, - 0x5a, 0x38, 0x6b, 0x1a, 0x34, 0xc3, 0xc8, 0x54, 0x15, 0x7f, 0x8f, 0x2a, 0x99, 0x67, 0xa1, 0x6e, - 0x26, 0xdd, 0xf9, 0x59, 0xfe, 0x79, 0xb0, 0x74, 0x77, 0xab, 0x18, 0xbb, 0x22, 0x53, 0xc1, 0xd6, - 0x3f, 0x0a, 0x6a, 0xbe, 0x2a, 0xbe, 0x7d, 0x26, 0x24, 0x7e, 0xba, 0x12, 0xa3, 0x91, 0xb3, 0x54, - 0x99, 0x48, 0x23, 0xbc, 0x96, 0x45, 0x58, 0x99, 0x20, 0x73, 0xf1, 0x0d, 0x51, 0x89, 0x49, 0x70, - 0x27, 0xc1, 0xdd, 0xbb, 0x70, 0x70, 0x0b, 0x17, 0x9f, 0x4d, 0xa2, 0x6f, 0x62, 0x71, 0x92, 0xfa, - 0x68, 0xfd, 0xaa, 0xa0, 0x6b, 0x47, 0xe0, 0x8f, 0x99, 0x0d, 0x04, 0x7a, 0xe0, 0x83, 0x67, 0x03, - 0x36, 0x51, 0x75, 0xda, 0xa5, 0xd9, 0x70, 0x6e, 0x64, 0xec, 0xea, 0xb4, 0xa3, 0xc9, 0xec, 0x9b, - 0xe9, 0x20, 0xdf, 0x3c, 0x77, 0x90, 0xdf, 0x40, 0xc5, 0x11, 0x95, 0x03, 0xb5, 0x90, 0x7c, 0x51, - 0x89, 0xad, 0x0f, 0xa8, 0x1c, 0x90, 0x04, 0x4d, 0xac, 0xdc, 0x97, 0xc9, 0x18, 0x2c, 0x65, 0x56, - 0xee, 0x4b, 0x92, 0xa0, 0xad, 0xbf, 0xaf, 0xa0, 0xc6, 0x63, 0xea, 0xb0, 0xee, 0xe5, 0xf2, 0xb8, - 0x5c, 0x1e, 0xaf, 0x5f, 0x1e, 0xe8, 0x72, 0x79, 0x5c, 0x64, 0x79, 0xb4, 0x4e, 0x15, 0xa4, 0xad, - 0xb4, 0xd9, 0x9b, 0x1e, 0xee, 0x3f, 0xac, 0x0c, 0xf7, 0xcf, 0xf3, 0xf7, 0xeb, 0xca, 0xed, 0x57, - 0xc6, 0xfb, 0xbf, 0x0a, 0x6a, 0xbd, 0x3a, 0xc6, 0x37, 0x30, 0xe0, 0xdd, 0xc5, 0x01, 0xff, 0xf5, - 0xff, 0x08, 0x30, 0xcf, 0x88, 0xff, 0x4d, 0x41, 0x6f, 0xad, 0x99, 0x64, 0xf8, 0x3d, 0x54, 0x08, - 0x7c, 0x27, 0x9b, 0xc8, 0xe5, 0x28, 0xd4, 0x0b, 0x8f, 0xc8, 0x3e, 0x89, 0x31, 0x4c, 0x51, 0x59, - 0xa4, 0x4b, 0x21, 0x0b, 0xff, 0x4e, 0xfe, 0x3b, 0x2e, 0x6f, 0x13, 0xab, 0x16, 0x85, 0x7a, 0x79, - 0x82, 0x4e, 0x74, 0x71, 0x1b, 0x55, 0x6c, 0x6a, 0x05, 0x5e, 0xd7, 0x49, 0xd7, 0x46, 0xdd, 0xaa, - 0xc7, 0xe9, 0xda, 0xbb, 0x9b, 0x62, 0x64, 0x6a, 0xb5, 0x0e, 0x4f, 0x4e, 0xb5, 0x8d, 0x17, 0xa7, - 0xda, 0xc6, 0xcb, 0x53, 0x6d, 0xe3, 0xe7, 0x48, 0x53, 0x4e, 0x22, 0x4d, 0x79, 0x11, 0x69, 0xca, - 0xcb, 0x48, 0x53, 0xfe, 0x8c, 0x34, 0xe5, 0x97, 0xbf, 0xb4, 0x8d, 0xef, 0xda, 0x79, 0xff, 0xc6, - 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0x7e, 0xc9, 0x34, 0x4c, 0x0a, 0x0e, 0x00, 0x00, + // 1041 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x4f, 0x73, 0xdb, 0xc4, + 0x1b, 0x8e, 0xe2, 0xf8, 0x17, 0x67, 0xed, 0x24, 0xcd, 0xfe, 0x80, 0x88, 0xd0, 0xb1, 0x3c, 0x3e, + 0x30, 0xbe, 0x20, 0xb5, 0x29, 0x03, 0xa5, 0x0c, 0x87, 0x2a, 0xb4, 0x03, 0x33, 0x49, 0x5a, 0x36, + 0xfd, 0x33, 0x03, 0x65, 0xa6, 0x6b, 0xf9, 0xb5, 0xbd, 0x58, 0xd2, 0x7a, 0xb4, 0xab, 0xb4, 0x19, + 0x2e, 0x7c, 0x04, 0xbe, 0x02, 0x1f, 0x84, 0x03, 0xb7, 0x1c, 0x7b, 0xec, 0x05, 0x0d, 0x11, 0x67, + 0x0e, 0x5c, 0x73, 0x62, 0xb4, 0x52, 0x6c, 0xcb, 0x76, 0x5a, 0x11, 0x66, 0x72, 0xca, 0xcd, 0xfb, + 0xbc, 0xfb, 0xbe, 0xcf, 0x3e, 0xab, 0x77, 0xdf, 0x67, 0x8c, 0xbe, 0x19, 0xdc, 0x16, 0x26, 0xe3, + 0xd6, 0x20, 0x6c, 0x43, 0xe0, 0x83, 0x04, 0x61, 0x1d, 0x82, 0xdf, 0xe1, 0x81, 0x95, 0x05, 0xe8, + 0x90, 0x59, 0xb4, 0xe3, 0x31, 0x21, 0x18, 0xf7, 0x03, 0xe8, 0x31, 0x21, 0x03, 0x2a, 0x19, 0xf7, + 0xad, 0xc3, 0x9b, 0x6d, 0x90, 0xf4, 0xa6, 0xd5, 0x03, 0x1f, 0x02, 0x2a, 0xa1, 0x63, 0x0e, 0x03, + 0x2e, 0x39, 0x6e, 0xa5, 0x99, 0x26, 0x1d, 0x32, 0x73, 0x6e, 0xa6, 0x99, 0x65, 0x6e, 0x7d, 0xd4, + 0x63, 0xb2, 0x1f, 0xb6, 0x4d, 0x87, 0x7b, 0x56, 0x8f, 0xf7, 0xb8, 0xa5, 0x0a, 0xb4, 0xc3, 0xae, + 0x5a, 0xa9, 0x85, 0xfa, 0x95, 0x16, 0xde, 0xba, 0x55, 0xe0, 0x48, 0xd3, 0xa7, 0xd9, 0xfa, 0x78, + 0x9c, 0xe4, 0x51, 0xa7, 0xcf, 0x7c, 0x08, 0x8e, 0xac, 0xe1, 0xa0, 0x97, 0x00, 0xc2, 0xf2, 0x40, + 0xd2, 0x79, 0x59, 0xd6, 0x79, 0x59, 0x41, 0xe8, 0x4b, 0xe6, 0xc1, 0x4c, 0xc2, 0x27, 0x6f, 0x4b, + 0x10, 0x4e, 0x1f, 0x3c, 0x3a, 0x9d, 0xd7, 0xec, 0xa2, 0xb5, 0x3d, 0x2a, 0x9d, 0xfe, 0x0e, 0xf7, + 0x3b, 0x2c, 0xd1, 0x80, 0x1b, 0x68, 0xc9, 0xa7, 0x1e, 0xe8, 0x5a, 0x43, 0x6b, 0xad, 0xd8, 0xb5, + 0xe3, 0xc8, 0x58, 0x88, 0x23, 0x63, 0x69, 0x9f, 0x7a, 0x40, 0x54, 0x04, 0x6f, 0x23, 0x04, 0x2f, + 0x87, 0x01, 0x28, 0xfd, 0xfa, 0xa2, 0xda, 0x87, 0xb3, 0x7d, 0xe8, 0xde, 0x28, 0x42, 0x26, 0x76, + 0x35, 0x7f, 0xab, 0xa0, 0xf5, 0xbd, 0x50, 0x52, 0xc9, 0xfc, 0xde, 0x53, 0x68, 0xf7, 0x39, 0x1f, + 0x14, 0x60, 0x7a, 0x81, 0x6a, 0x8e, 0xcb, 0xc0, 0x97, 0x3b, 0xdc, 0xef, 0xb2, 0x9e, 0xe2, 0xaa, + 0x6e, 0x7f, 0x61, 0x16, 0xfd, 0xc2, 0x66, 0x46, 0xb5, 0x33, 0x51, 0xc4, 0x7e, 0x27, 0x23, 0xaa, + 0x4d, 0xa2, 0x24, 0x47, 0x84, 0x9f, 0xa1, 0x72, 0x10, 0xba, 0x20, 0xf4, 0x52, 0xa3, 0xd4, 0xaa, + 0x6e, 0x7f, 0x5a, 0x84, 0xd1, 0x24, 0xa1, 0x0b, 0x4f, 0x99, 0xec, 0x3f, 0x18, 0x42, 0x0a, 0x0a, + 0x7b, 0x35, 0xe3, 0x2a, 0x27, 0x31, 0x41, 0xd2, 0xa2, 0x78, 0x17, 0xad, 0x76, 0x29, 0x73, 0xc3, + 0x00, 0x1e, 0x72, 0x97, 0x39, 0x47, 0xfa, 0x92, 0xba, 0x81, 0x0f, 0xe3, 0xc8, 0x58, 0xbd, 0x3f, + 0x19, 0x38, 0x8d, 0x8c, 0x8d, 0x1c, 0xf0, 0xe8, 0x68, 0x08, 0x24, 0x9f, 0x8c, 0xbf, 0x44, 0x55, + 0x2f, 0xf9, 0x84, 0x59, 0xad, 0x15, 0x55, 0xab, 0x19, 0x47, 0x46, 0x75, 0x6f, 0x0c, 0x9f, 0x46, + 0xc6, 0xfa, 0xc4, 0x52, 0xd5, 0x99, 0x4c, 0xc3, 0x2f, 0xd1, 0x46, 0x72, 0xe5, 0x62, 0x48, 0x1d, + 0x38, 0x00, 0x17, 0x1c, 0xc9, 0x03, 0xbd, 0xac, 0xee, 0xfb, 0xd6, 0x84, 0xfa, 0x51, 0x73, 0x99, + 0xc3, 0x41, 0x2f, 0x01, 0x84, 0x99, 0xf4, 0x70, 0x22, 0x7f, 0x97, 0xb6, 0xc1, 0x3d, 0x4b, 0xb5, + 0xdf, 0x8d, 0x23, 0x63, 0x63, 0x7f, 0xba, 0x22, 0x99, 0x25, 0xc1, 0x1c, 0xad, 0xf1, 0xf6, 0x0f, + 0xe0, 0xc8, 0x11, 0x6d, 0xf5, 0xe2, 0xb4, 0x38, 0x8e, 0x8c, 0xb5, 0x07, 0xb9, 0x72, 0x64, 0xaa, + 0x7c, 0x72, 0x61, 0x82, 0x75, 0xe0, 0x5e, 0xb7, 0x0b, 0x8e, 0x14, 0xfa, 0xff, 0xc6, 0x17, 0x76, + 0x30, 0x86, 0x93, 0x0b, 0x1b, 0x2f, 0x77, 0x5c, 0x2a, 0x04, 0x99, 0x4c, 0xc3, 0x77, 0xd0, 0x5a, + 0xf2, 0xb0, 0x78, 0x28, 0x0f, 0xc0, 0xe1, 0x7e, 0x47, 0xe8, 0xcb, 0x0d, 0xad, 0x55, 0x4e, 0x4f, + 0xf0, 0x28, 0x17, 0x21, 0x53, 0x3b, 0xf1, 0x63, 0xb4, 0x39, 0xea, 0x22, 0x02, 0x87, 0x0c, 0x5e, + 0x3c, 0x81, 0x20, 0x59, 0x08, 0xbd, 0xd2, 0x28, 0xb5, 0x56, 0xec, 0x0f, 0xe2, 0xc8, 0xd8, 0xbc, + 0x3b, 0x7f, 0x0b, 0x39, 0x2f, 0x17, 0x3f, 0x47, 0x38, 0x00, 0xe6, 0x1f, 0x72, 0x47, 0xb5, 0x5f, + 0xd6, 0x10, 0x48, 0xe9, 0xbb, 0x11, 0x47, 0x06, 0x26, 0x33, 0xd1, 0xd3, 0xc8, 0x78, 0x6f, 0x16, + 0x55, 0xed, 0x31, 0xa7, 0x16, 0xfe, 0x11, 0xad, 0x7b, 0xb9, 0x71, 0x21, 0xf4, 0x9a, 0x7a, 0x21, + 0xb7, 0x8b, 0xbf, 0xc9, 0xfc, 0xbc, 0xb1, 0x37, 0xb3, 0x27, 0xb2, 0x9e, 0xc7, 0x05, 0x99, 0x66, + 0x6a, 0xfe, 0xae, 0xa1, 0xeb, 0x53, 0x33, 0x24, 0x7d, 0xae, 0x61, 0xca, 0x80, 0x9f, 0xa3, 0x4a, + 0xd2, 0x15, 0x1d, 0x2a, 0xa9, 0x1a, 0x2a, 0xd5, 0xed, 0x1b, 0xc5, 0x7a, 0x28, 0x6d, 0x98, 0x3d, + 0x90, 0x74, 0x3c, 0xc8, 0xc6, 0x18, 0x19, 0x55, 0xc5, 0xdf, 0xa1, 0x4a, 0xc6, 0x2c, 0xf4, 0x45, + 0x25, 0xfc, 0xb3, 0x7f, 0x21, 0x3c, 0x7f, 0x76, 0x7b, 0x29, 0xa1, 0x22, 0xa3, 0x82, 0xcd, 0xbf, + 0x34, 0xd4, 0x78, 0x93, 0xbe, 0x5d, 0x26, 0x24, 0x7e, 0x36, 0xa3, 0xd1, 0x2c, 0xf8, 0x4e, 0x98, + 0x48, 0x15, 0x5e, 0xcb, 0x14, 0x56, 0xce, 0x90, 0x09, 0x7d, 0x03, 0x54, 0x66, 0x12, 0xbc, 0x33, + 0x71, 0xf7, 0x2f, 0x2c, 0x2e, 0x77, 0xf0, 0xf1, 0x18, 0xfc, 0x3a, 0x29, 0x4e, 0x52, 0x8e, 0xe6, + 0x2f, 0x1a, 0xba, 0x76, 0x00, 0xc1, 0x21, 0x73, 0x80, 0x40, 0x17, 0x02, 0xf0, 0x1d, 0xc0, 0x16, + 0x5a, 0x19, 0x8d, 0x88, 0xcc, 0x19, 0x36, 0xb2, 0xec, 0x95, 0xd1, 0x38, 0x21, 0xe3, 0x3d, 0x23, + 0x17, 0x59, 0x3c, 0xd7, 0x45, 0xae, 0xa3, 0xa5, 0x21, 0x95, 0x7d, 0xbd, 0xa4, 0x76, 0x54, 0x92, + 0xe8, 0x43, 0x2a, 0xfb, 0x44, 0xa1, 0x2a, 0xca, 0x03, 0xa9, 0x66, 0x70, 0x39, 0x8b, 0xf2, 0x40, + 0x12, 0x85, 0x36, 0x4f, 0x96, 0xd1, 0xc6, 0x13, 0xea, 0xb2, 0xce, 0x95, 0x73, 0x5d, 0x39, 0xd7, + 0xdb, 0x9d, 0x0b, 0x5d, 0x39, 0xd7, 0x85, 0x9c, 0x6b, 0x8e, 0xaf, 0x54, 0x2f, 0xcd, 0x57, 0x4e, + 0x34, 0x54, 0x9f, 0x79, 0xe3, 0x97, 0xed, 0x2c, 0xdf, 0xcf, 0x38, 0xcb, 0xe7, 0xc5, 0xa5, 0xcf, + 0x9c, 0x7e, 0xc6, 0x5b, 0xfe, 0xd6, 0x50, 0xf3, 0xcd, 0x1a, 0x2f, 0xc1, 0x5d, 0xbc, 0xbc, 0xbb, + 0x7c, 0xf5, 0x1f, 0x04, 0x16, 0xf1, 0x97, 0x5f, 0x35, 0xf4, 0xff, 0x39, 0x63, 0x14, 0xbf, 0x8f, + 0x4a, 0x61, 0xe0, 0x66, 0x76, 0xb0, 0x1c, 0x47, 0x46, 0xe9, 0x31, 0xd9, 0x25, 0x09, 0x86, 0x29, + 0x5a, 0x16, 0xa9, 0x23, 0x65, 0xf2, 0xef, 0x14, 0x3f, 0xe3, 0xb4, 0x95, 0xd9, 0xd5, 0x38, 0x32, + 0x96, 0xcf, 0xd0, 0xb3, 0xba, 0xb8, 0x85, 0x2a, 0x0e, 0xb5, 0x43, 0xbf, 0xe3, 0xa6, 0x9e, 0x55, + 0xb3, 0x6b, 0xc9, 0x75, 0xed, 0xdc, 0x4d, 0x31, 0x32, 0x8a, 0xda, 0xfb, 0xc7, 0x27, 0xf5, 0x85, + 0x57, 0x27, 0xf5, 0x85, 0xd7, 0x27, 0xf5, 0x85, 0x9f, 0xe2, 0xba, 0x76, 0x1c, 0xd7, 0xb5, 0x57, + 0x71, 0x5d, 0x7b, 0x1d, 0xd7, 0xb5, 0x3f, 0xe2, 0xba, 0xf6, 0xf3, 0x9f, 0xf5, 0x85, 0x6f, 0x5b, + 0x45, 0xff, 0x28, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x1f, 0xf5, 0x97, 0x1c, 0x6c, 0x0f, 0x00, + 0x00, +} + +func (m *MatchCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MatchCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { @@ -369,6 +436,20 @@ func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } if m.ObjectSelector != nil { { size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -626,6 +707,20 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } if m.ObjectSelector != nil { { size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -871,6 +966,19 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *MatchCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *MutatingWebhook) Size() (n int) { if m == nil { return 0 @@ -920,6 +1028,12 @@ func (m *MutatingWebhook) Size() (n int) { l = m.ObjectSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1022,6 +1136,12 @@ func (m *ValidatingWebhook) Size() (n int) { l = m.ObjectSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1086,6 +1206,17 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *MatchCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MatchCondition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} func (this *MutatingWebhook) String() string { if this == nil { return "nil" @@ -1095,6 +1226,11 @@ func (this *MutatingWebhook) String() string { repeatedStringForRules += fmt.Sprintf("%v", f) + "," } repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" s := strings.Join([]string{`&MutatingWebhook{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, @@ -1107,6 +1243,7 @@ func (this *MutatingWebhook) String() string { `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v11.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, `}`, }, "") return s @@ -1165,6 +1302,11 @@ func (this *ValidatingWebhook) String() string { repeatedStringForRules += fmt.Sprintf("%v", f) + "," } repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" s := strings.Join([]string{`&ValidatingWebhook{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, @@ -1176,6 +1318,7 @@ func (this *ValidatingWebhook) String() string { `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v11.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, `}`, }, "") return s @@ -1232,6 +1375,120 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *MatchCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1616,6 +1873,40 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2389,6 +2680,40 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto index c7016afbf..cfd759285 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -29,6 +29,35 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/admissionregistration/v1beta1"; +// MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook. +message MatchCondition { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + optional string name = 1; + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + optional string expression = 2; +} + // MutatingWebhook describes an admission webhook and the resources and operations it applies to. message MutatingWebhook { // The name of the admission webhook. @@ -177,6 +206,28 @@ message MutatingWebhook { // Defaults to "Never". // +optional optional string reinvocationPolicy = 10; + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + repeated MatchCondition matchConditions = 12; } // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. @@ -356,6 +407,28 @@ message ValidatingWebhook { // Default to `['v1beta1']`. // +optional repeated string admissionReviewVersions = 8; + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + repeated MatchCondition matchConditions = 11; } // ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index 5fdf8e3fa..82ee7df9b 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -283,6 +283,28 @@ type ValidatingWebhook struct { // Default to `['v1beta1']`. // +optional AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"` + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,11,rep,name=matchConditions"` } // MutatingWebhook describes an admission webhook and the resources and operations it applies to. @@ -433,6 +455,28 @@ type MutatingWebhook struct { // Defaults to "Never". // +optional ReinvocationPolicy *ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,10,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,12,rep,name=matchConditions"` } // ReinvocationPolicyType specifies what type of policy the admission hook uses. @@ -531,3 +575,32 @@ type ServiceReference struct { // +optional Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` } + +// MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook. +type MatchCondition struct { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + Expression string `json:"expression" protobuf:"bytes,2,opt,name=expression"` +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go index c57c5b7fa..2c0a9f011 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -24,9 +24,19 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_MatchCondition = map[string]string{ + "": "MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook.", + "name": "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.", + "expression": "Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:\n\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\nDocumentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/\n\nRequired.", +} + +func (MatchCondition) SwaggerDoc() map[string]string { + return map_MatchCondition +} + var map_MutatingWebhook = map[string]string{ "": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", @@ -40,6 +50,7 @@ var map_MutatingWebhook = map[string]string{ "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", "reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.", } func (MutatingWebhook) SwaggerDoc() map[string]string { @@ -90,6 +101,7 @@ var map_ValidatingWebhook = map[string]string{ "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.", } func (ValidatingWebhook) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go index ced4af19c..9c5299bdf 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go @@ -27,6 +27,22 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchCondition) DeepCopyInto(out *MatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchCondition. +func (in *MatchCondition) DeepCopy() *MatchCondition { + if in == nil { + return nil + } + out := new(MatchCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { *out = *in @@ -78,6 +94,11 @@ func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { *out = new(ReinvocationPolicyType) **out = **in } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } return } @@ -229,6 +250,11 @@ func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go index 6de934200..3b75fa65b 100644 --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ServerStorageVersion = map[string]string{ diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto index 534b550fe..a7a7e7c54 100644 --- a/vendor/k8s.io/api/apps/v1/generated.proto +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -127,6 +127,7 @@ message DaemonSetSpec { // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template optional k8s.io.api.core.v1.PodTemplateSpec template = 2; @@ -277,6 +278,7 @@ message DeploymentSpec { optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". optional k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. @@ -675,6 +677,7 @@ message StatefulSetSpec { // of the StatefulSet. Each pod will be named with the format // -. For example, a pod in a StatefulSet named // "web" with index number "3" would be named "web-3". + // The only allowed template.spec.restartPolicy value is "Always". optional k8s.io.api.core.v1.PodTemplateSpec template = 3; // volumeClaimTemplates is a list of claims that pods are allowed to reference. @@ -735,7 +738,7 @@ message StatefulSetSpec { // default ordinals behavior assigns a "0" index to the first replica and // increments the index by one for each additional replica requested. Using // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is alpha. + // enabled, which is beta. // +optional optional StatefulSetOrdinals ordinals = 11; } diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go index 09766c295..15dc3150a 100644 --- a/vendor/k8s.io/api/apps/v1/types.go +++ b/vendor/k8s.io/api/apps/v1/types.go @@ -199,6 +199,7 @@ type StatefulSetSpec struct { // of the StatefulSet. Each pod will be named with the format // -. For example, a pod in a StatefulSet named // "web" with index number "3" would be named "web-3". + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // volumeClaimTemplates is a list of claims that pods are allowed to reference. @@ -259,7 +260,7 @@ type StatefulSetSpec struct { // default ordinals behavior assigns a "0" index to the first replica and // increments the index by one for each additional replica requested. Using // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is alpha. + // enabled, which is beta. // +optional Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } @@ -379,6 +380,7 @@ type DeploymentSpec struct { Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // The deployment strategy to use to replace existing pods with new ones. @@ -638,6 +640,7 @@ type DaemonSetSpec struct { // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index 509bb11c5..6676da064 100644 --- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ @@ -85,7 +85,7 @@ func (DaemonSetList) SwaggerDoc() map[string]string { var map_DaemonSetSpec = map[string]string{ "": "DaemonSetSpec is the specification of a daemon set.", "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). The only allowed template.spec.restartPolicy value is \"Always\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", "revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", @@ -162,7 +162,7 @@ var map_DeploymentSpec = map[string]string{ "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", - "template": "Template describes the pods that will be created.", + "template": "Template describes the pods that will be created. The only allowed template.spec.restartPolicy value is \"Always\".", "strategy": "The deployment strategy to use to replace existing pods with new ones.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", @@ -347,7 +347,7 @@ var map_StatefulSetSpec = map[string]string{ "": "A StatefulSetSpec is the specification of a StatefulSet.", "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format -. For example, a pod in a StatefulSet named \"web\" with index number \"3\" would be named \"web-3\".", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format -. For example, a pod in a StatefulSet named \"web\" with index number \"3\" would be named \"web-3\". The only allowed template.spec.restartPolicy value is \"Always\".", "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", @@ -355,7 +355,7 @@ var map_StatefulSetSpec = map[string]string{ "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "persistentVolumeClaimRetentionPolicy": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. +optional", - "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is alpha.", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto index 15fb1aa87..245ec30f4 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -47,10 +47,10 @@ message ControllerRevision { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Data is the serialized representation of the state. + // data is the serialized representation of the state. optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; - // Revision indicates the revision of the state represented by Data. + // revision indicates the revision of the state represented by Data. optional int64 revision = 3; } @@ -128,17 +128,18 @@ message DeploymentRollback { // DeploymentSpec is the specification of the desired behavior of the Deployment. message DeploymentSpec { - // Number of desired pods. This is a pointer to distinguish between explicit + // replicas is the number of desired pods. This is a pointer to distinguish between explicit // zero and not specified. Defaults to 1. // +optional optional int32 replicas = 1; - // Label selector for pods. Existing ReplicaSets whose pods are + // selector is the label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". optional k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. @@ -146,28 +147,28 @@ message DeploymentSpec { // +patchStrategy=retainKeys optional DeploymentStrategy strategy = 4; - // Minimum number of seconds for which a newly created pod should be ready + // minReadySeconds is the minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) // +optional optional int32 minReadySeconds = 5; - // The number of old ReplicaSets to retain to allow rollback. + // revisionHistoryLimit is the number of old ReplicaSets to retain to allow rollback. // This is a pointer to distinguish between explicit zero and not specified. // Defaults to 2. // +optional optional int32 revisionHistoryLimit = 6; - // Indicates that the deployment is paused. + // paused indicates that the deployment is paused. // +optional optional bool paused = 7; // DEPRECATED. - // The config this deployment is rolling back to. Will be cleared after rollback is done. + // rollbackTo is the config this deployment is rolling back to. Will be cleared after rollback is done. // +optional optional RollbackConfig rollbackTo = 8; - // The maximum time in seconds for a deployment to make progress before it + // progressDeadlineSeconds is the maximum time in seconds for a deployment to make progress before it // is considered to be failed. The deployment controller will continue to // process failed deployments and a condition with a ProgressDeadlineExceeded // reason will be surfaced in the deployment status. Note that progress will @@ -178,15 +179,15 @@ message DeploymentSpec { // DeploymentStatus is the most recently observed status of the Deployment. message DeploymentStatus { - // The generation observed by the deployment controller. + // observedGeneration is the generation observed by the deployment controller. // +optional optional int64 observedGeneration = 1; - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector). // +optional optional int32 replicas = 2; - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec. // +optional optional int32 updatedReplicas = 3; @@ -198,18 +199,18 @@ message DeploymentStatus { // +optional optional int32 availableReplicas = 4; - // Total number of unavailable pods targeted by this deployment. This is the total number of + // unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of // pods that are still required for the deployment to have 100% available capacity. They may // either be pods that are running but not yet available or pods that still have not been created. // +optional optional int32 unavailableReplicas = 5; - // Represents the latest available observations of a deployment's current state. + // Conditions represent the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge repeated DeploymentCondition conditions = 6; - // Count of hash collisions for the Deployment. The Deployment controller uses this + // collisionCount is the count of hash collisions for the Deployment. The Deployment controller uses this // field as a collision avoidance mechanism when it needs to create the name for the // newest ReplicaSet. // +optional @@ -276,7 +277,7 @@ message RollingUpdateStatefulSetStrategy { // This is helpful in being able to do a canary based deployment. The default value is 0. optional int32 partition = 1; - // The maximum number of pods that can be unavailable during the update. + // maxUnavailable is the maximum number of pods that can be unavailable during the update. // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). // Absolute number is calculated from percentage by rounding up. This can not be 0. // Defaults to 1. This field is alpha-level and is only honored by servers that enable the @@ -293,32 +294,32 @@ message Scale { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // status defines current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } // ScaleSpec describes the attributes of a scale subresource message ScaleSpec { - // desired number of instances for the scaled object. + // replicas is the number of observed instances of the scaled object. // +optional optional int32 replicas = 1; } // ScaleStatus represents the current status of a scale subresource. message ScaleStatus { - // actual number of observed instances of the scaled object. + // replias is the actual number of observed instances of the scaled object. optional int32 replicas = 1; - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional map selector = 2; - // label selector for pods that should match the replicas count. This is a serializated + // targetSelector is the label selector for pods that should match the replicas count. This is a serializated // version of both map-based and more expressive set-based selectors. This is done to // avoid introspection in the clients. The string will be in the same format as the // query-param syntax. If the target type only supports map-based selectors, both this @@ -398,13 +399,13 @@ message StatefulSetOrdinals { // StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs // created from the StatefulSet VolumeClaimTemplates. message StatefulSetPersistentVolumeClaimRetentionPolicy { - // WhenDeleted specifies what happens to PVCs created from StatefulSet + // whenDeleted specifies what happens to PVCs created from StatefulSet // VolumeClaimTemplates when the StatefulSet is deleted. The default policy // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The // `Delete` policy causes those PVCs to be deleted. optional string whenDeleted = 1; - // WhenScaled specifies what happens to PVCs created from StatefulSet + // whenScaled specifies what happens to PVCs created from StatefulSet // VolumeClaimTemplates when the StatefulSet is scaled down. The default // policy of `Retain` causes PVCs to not be affected by a scaledown. The // `Delete` policy causes the associated PVCs for any excess pods above @@ -475,7 +476,7 @@ message StatefulSetSpec { // StatefulSetSpec version. The default value is 10. optional int32 revisionHistoryLimit = 8; - // Minimum number of seconds for which a newly created pod should be ready + // minReadySeconds is the minimum number of seconds for which a newly created pod should be ready // without any of its container crashing for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) // +optional @@ -491,7 +492,7 @@ message StatefulSetSpec { // default ordinals behavior assigns a "0" index to the first replica and // increments the index by one for each additional replica requested. Using // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is alpha. + // enabled, which is beta. // +optional optional StatefulSetOrdinals ordinals = 11; } @@ -531,13 +532,13 @@ message StatefulSetStatus { // +optional optional int32 collisionCount = 9; - // Represents the latest available observations of a statefulset's current state. + // conditions represent the latest available observations of a statefulset's current state. // +optional // +patchMergeKey=type // +patchStrategy=merge repeated StatefulSetCondition conditions = 10; - // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. + // availableReplicas is the total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. // +optional optional int32 availableReplicas = 11; } diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go index 910023090..59ed9c2ac 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -31,21 +31,21 @@ const ( // ScaleSpec describes the attributes of a scale subresource type ScaleSpec struct { - // desired number of instances for the scaled object. + // replicas is the number of observed instances of the scaled object. // +optional Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` } // ScaleStatus represents the current status of a scale subresource. type ScaleStatus struct { - // actual number of observed instances of the scaled object. + // replias is the actual number of observed instances of the scaled object. Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` - // label selector for pods that should match the replicas count. This is a serializated + // targetSelector is the label selector for pods that should match the replicas count. This is a serializated // version of both map-based and more expressive set-based selectors. This is done to // avoid introspection in the clients. The string will be in the same format as the // query-param syntax. If the target type only supports map-based selectors, both this @@ -68,11 +68,11 @@ type Scale struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // status defines current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -159,7 +159,7 @@ type RollingUpdateStatefulSetStrategy struct { // Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. // This is helpful in being able to do a canary based deployment. The default value is 0. Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` - // The maximum number of pods that can be unavailable during the update. + // maxUnavailable is the maximum number of pods that can be unavailable during the update. // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). // Absolute number is calculated from percentage by rounding up. This can not be 0. // Defaults to 1. This field is alpha-level and is only honored by servers that enable the @@ -191,12 +191,12 @@ const ( // StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs // created from the StatefulSet VolumeClaimTemplates. type StatefulSetPersistentVolumeClaimRetentionPolicy struct { - // WhenDeleted specifies what happens to PVCs created from StatefulSet + // whenDeleted specifies what happens to PVCs created from StatefulSet // VolumeClaimTemplates when the StatefulSet is deleted. The default policy // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The // `Delete` policy causes those PVCs to be deleted. WhenDeleted PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty" protobuf:"bytes,1,opt,name=whenDeleted,casttype=PersistentVolumeClaimRetentionPolicyType"` - // WhenScaled specifies what happens to PVCs created from StatefulSet + // whenScaled specifies what happens to PVCs created from StatefulSet // VolumeClaimTemplates when the StatefulSet is scaled down. The default // policy of `Retain` causes PVCs to not be affected by a scaledown. The // `Delete` policy causes the associated PVCs for any excess pods above @@ -282,7 +282,7 @@ type StatefulSetSpec struct { // StatefulSetSpec version. The default value is 10. RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"` - // Minimum number of seconds for which a newly created pod should be ready + // minReadySeconds is the minimum number of seconds for which a newly created pod should be ready // without any of its container crashing for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) // +optional @@ -298,7 +298,7 @@ type StatefulSetSpec struct { // default ordinals behavior assigns a "0" index to the first replica and // increments the index by one for each additional replica requested. Using // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is alpha. + // enabled, which is beta. // +optional Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } @@ -338,13 +338,13 @@ type StatefulSetStatus struct { // +optional CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` - // Represents the latest available observations of a statefulset's current state. + // conditions represent the latest available observations of a statefulset's current state. // +optional // +patchMergeKey=type // +patchStrategy=merge Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` - // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. + // availableReplicas is the total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. // +optional AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,11,opt,name=availableReplicas"` } @@ -409,17 +409,18 @@ type Deployment struct { // DeploymentSpec is the specification of the desired behavior of the Deployment. type DeploymentSpec struct { - // Number of desired pods. This is a pointer to distinguish between explicit + // replicas is the number of desired pods. This is a pointer to distinguish between explicit // zero and not specified. Defaults to 1. // +optional Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` - // Label selector for pods. Existing ReplicaSets whose pods are + // selector is the label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // The deployment strategy to use to replace existing pods with new ones. @@ -427,28 +428,28 @@ type DeploymentSpec struct { // +patchStrategy=retainKeys Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` - // Minimum number of seconds for which a newly created pod should be ready + // minReadySeconds is the minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) // +optional MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` - // The number of old ReplicaSets to retain to allow rollback. + // revisionHistoryLimit is the number of old ReplicaSets to retain to allow rollback. // This is a pointer to distinguish between explicit zero and not specified. // Defaults to 2. // +optional RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` - // Indicates that the deployment is paused. + // paused indicates that the deployment is paused. // +optional Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` // DEPRECATED. - // The config this deployment is rolling back to. Will be cleared after rollback is done. + // rollbackTo is the config this deployment is rolling back to. Will be cleared after rollback is done. // +optional RollbackTo *RollbackConfig `json:"rollbackTo,omitempty" protobuf:"bytes,8,opt,name=rollbackTo"` - // The maximum time in seconds for a deployment to make progress before it + // progressDeadlineSeconds is the maximum time in seconds for a deployment to make progress before it // is considered to be failed. The deployment controller will continue to // process failed deployments and a condition with a ProgressDeadlineExceeded // reason will be surfaced in the deployment status. Note that progress will @@ -547,15 +548,15 @@ type RollingUpdateDeployment struct { // DeploymentStatus is the most recently observed status of the Deployment. type DeploymentStatus struct { - // The generation observed by the deployment controller. + // observedGeneration is the generation observed by the deployment controller. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector). // +optional Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec. // +optional UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` @@ -567,18 +568,18 @@ type DeploymentStatus struct { // +optional AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` - // Total number of unavailable pods targeted by this deployment. This is the total number of + // unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of // pods that are still required for the deployment to have 100% available capacity. They may // either be pods that are running but not yet available or pods that still have not been created. // +optional UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` - // Represents the latest available observations of a deployment's current state. + // Conditions represent the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` - // Count of hash collisions for the Deployment. The Deployment controller uses this + // collisionCount is the count of hash collisions for the Deployment. The Deployment controller uses this // field as a collision avoidance mechanism when it needs to create the name for the // newest ReplicaSet. // +optional @@ -660,10 +661,10 @@ type ControllerRevision struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Data is the serialized representation of the state. + // data is the serialized representation of the state. Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` - // Revision indicates the revision of the state represented by Data. + // revision indicates the revision of the state represented by Data. Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"` } diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index 00d6d1825..a62e9869d 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -24,14 +24,14 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ "": "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "data": "Data is the serialized representation of the state.", - "revision": "Revision indicates the revision of the state represented by Data.", + "data": "data is the serialized representation of the state.", + "revision": "revision indicates the revision of the state represented by Data.", } func (ControllerRevision) SwaggerDoc() map[string]string { @@ -96,15 +96,15 @@ func (DeploymentRollback) SwaggerDoc() map[string]string { var map_DeploymentSpec = map[string]string{ "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", - "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", - "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", - "template": "Template describes the pods that will be created.", + "replicas": "replicas is the number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", + "selector": "selector is the label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", + "template": "Template describes the pods that will be created. The only allowed template.spec.restartPolicy value is \"Always\".", "strategy": "The deployment strategy to use to replace existing pods with new ones.", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 2.", - "paused": "Indicates that the deployment is paused.", - "rollbackTo": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.", - "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", + "minReadySeconds": "minReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "revisionHistoryLimit": "revisionHistoryLimit is the number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 2.", + "paused": "paused indicates that the deployment is paused.", + "rollbackTo": "DEPRECATED. rollbackTo is the config this deployment is rolling back to. Will be cleared after rollback is done.", + "progressDeadlineSeconds": "progressDeadlineSeconds is the maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", } func (DeploymentSpec) SwaggerDoc() map[string]string { @@ -113,14 +113,14 @@ func (DeploymentSpec) SwaggerDoc() map[string]string { var map_DeploymentStatus = map[string]string{ "": "DeploymentStatus is the most recently observed status of the Deployment.", - "observedGeneration": "The generation observed by the deployment controller.", - "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", - "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + "observedGeneration": "observedGeneration is the generation observed by the deployment controller.", + "replicas": "replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector).", + "updatedReplicas": "updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec.", "readyReplicas": "readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition.", "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", - "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", - "conditions": "Represents the latest available observations of a deployment's current state.", - "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", + "unavailableReplicas": "unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + "conditions": "Conditions represent the latest available observations of a deployment's current state.", + "collisionCount": "collisionCount is the count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", } func (DeploymentStatus) SwaggerDoc() map[string]string { @@ -159,7 +159,7 @@ func (RollingUpdateDeployment) SwaggerDoc() map[string]string { var map_RollingUpdateStatefulSetStrategy = map[string]string{ "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0.", - "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. Defaults to 1. This field is alpha-level and is only honored by servers that enable the MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable.", + "maxUnavailable": "maxUnavailable is the maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. Defaults to 1. This field is alpha-level and is only honored by servers that enable the MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable.", } func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { @@ -169,8 +169,8 @@ func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", + "spec": "spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "status defines current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { @@ -179,7 +179,7 @@ func (Scale) SwaggerDoc() map[string]string { var map_ScaleSpec = map[string]string{ "": "ScaleSpec describes the attributes of a scale subresource", - "replicas": "desired number of instances for the scaled object.", + "replicas": "replicas is the number of observed instances of the scaled object.", } func (ScaleSpec) SwaggerDoc() map[string]string { @@ -188,9 +188,9 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "ScaleStatus represents the current status of a scale subresource.", - "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", - "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "replicas": "replias is the actual number of observed instances of the scaled object.", + "selector": "selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", + "targetSelector": "targetSelector is the label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", } func (ScaleStatus) SwaggerDoc() map[string]string { @@ -239,8 +239,8 @@ func (StatefulSetOrdinals) SwaggerDoc() map[string]string { var map_StatefulSetPersistentVolumeClaimRetentionPolicy = map[string]string{ "": "StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.", - "whenDeleted": "WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted.", - "whenScaled": "WhenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted.", + "whenDeleted": "whenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted.", + "whenScaled": "whenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted.", } func (StatefulSetPersistentVolumeClaimRetentionPolicy) SwaggerDoc() map[string]string { @@ -257,9 +257,9 @@ var map_StatefulSetSpec = map[string]string{ "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "minReadySeconds": "minReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.", - "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is alpha.", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { @@ -276,8 +276,8 @@ var map_StatefulSetStatus = map[string]string{ "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", - "conditions": "Represents the latest available observations of a statefulset's current state.", - "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet.", + "conditions": "conditions represent the latest available observations of a statefulset's current state.", + "availableReplicas": "availableReplicas is the total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet.", } func (StatefulSetStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto index af8c4fe41..ddbe35441 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -131,6 +131,7 @@ message DaemonSetSpec { // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template optional k8s.io.api.core.v1.PodTemplateSpec template = 2; @@ -282,6 +283,7 @@ message DeploymentSpec { optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". optional k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. @@ -600,7 +602,7 @@ message ScaleStatus { // actual number of observed instances of the scaled object. optional int32 replicas = 1; - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional // +mapType=atomic map selector = 2; @@ -720,6 +722,7 @@ message StatefulSetSpec { // of the StatefulSet. Each pod will be named with the format // -. For example, a pod in a StatefulSet named // "web" with index number "3" would be named "web-3". + // The only allowed template.spec.restartPolicy value is "Always". optional k8s.io.api.core.v1.PodTemplateSpec template = 3; // volumeClaimTemplates is a list of claims that pods are allowed to reference. @@ -777,7 +780,7 @@ message StatefulSetSpec { // default ordinals behavior assigns a "0" index to the first replica and // increments the index by one for each additional replica requested. Using // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is alpha. + // enabled, which is beta. // +optional optional StatefulSetOrdinals ordinals = 11; } diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go index dbe4d23bf..a97ac6fcf 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -43,7 +43,7 @@ type ScaleStatus struct { // actual number of observed instances of the scaled object. Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional // +mapType=atomic Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` @@ -250,6 +250,7 @@ type StatefulSetSpec struct { // of the StatefulSet. Each pod will be named with the format // -. For example, a pod in a StatefulSet named // "web" with index number "3" would be named "web-3". + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // volumeClaimTemplates is a list of claims that pods are allowed to reference. @@ -307,7 +308,7 @@ type StatefulSetSpec struct { // default ordinals behavior assigns a "0" index to the first replica and // increments the index by one for each additional replica requested. Using // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is alpha. + // enabled, which is beta. // +optional Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } @@ -429,6 +430,7 @@ type DeploymentSpec struct { Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // The deployment strategy to use to replace existing pods with new ones. @@ -690,6 +692,7 @@ type DaemonSetSpec struct { // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index 1936a2467..d7e920991 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ @@ -85,7 +85,7 @@ func (DaemonSetList) SwaggerDoc() map[string]string { var map_DaemonSetSpec = map[string]string{ "": "DaemonSetSpec is the specification of a daemon set.", "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). The only allowed template.spec.restartPolicy value is \"Always\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", "revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", @@ -162,7 +162,7 @@ var map_DeploymentSpec = map[string]string{ "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", - "template": "Template describes the pods that will be created.", + "template": "Template describes the pods that will be created. The only allowed template.spec.restartPolicy value is \"Always\".", "strategy": "The deployment strategy to use to replace existing pods with new ones.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", @@ -313,7 +313,7 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "ScaleStatus represents the current status of a scale subresource.", "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "selector": "selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", } @@ -375,7 +375,7 @@ var map_StatefulSetSpec = map[string]string{ "": "A StatefulSetSpec is the specification of a StatefulSet.", "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format -. For example, a pod in a StatefulSet named \"web\" with index number \"3\" would be named \"web-3\".", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format -. For example, a pod in a StatefulSet named \"web\" with index number \"3\" would be named \"web-3\". The only allowed template.spec.restartPolicy value is \"Always\".", "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", @@ -383,7 +383,7 @@ var map_StatefulSetSpec = map[string]string{ "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.", - "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is alpha.", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go index 5d37ac1f8..b1a730b81 100644 --- a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_BoundObjectReference = map[string]string{ diff --git a/vendor/k8s.io/api/authentication/v1alpha1/generated.proto b/vendor/k8s.io/api/authentication/v1alpha1/generated.proto index 3198dce3b..51d925244 100644 --- a/vendor/k8s.io/api/authentication/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1alpha1/generated.proto @@ -30,7 +30,8 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; option go_package = "k8s.io/api/authentication/v1alpha1"; // SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. -// When using impersonation, users will receive the user info of the user being impersonated. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. message SelfSubjectReview { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata diff --git a/vendor/k8s.io/api/authentication/v1alpha1/types.go b/vendor/k8s.io/api/authentication/v1alpha1/types.go index da65028cd..1ee3612fb 100644 --- a/vendor/k8s.io/api/authentication/v1alpha1/types.go +++ b/vendor/k8s.io/api/authentication/v1alpha1/types.go @@ -25,10 +25,11 @@ import ( // +genclient:nonNamespaced // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.25 +// +k8s:prerelease-lifecycle-gen:introduced=1.26 // SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. -// When using impersonation, users will receive the user info of the user being impersonated. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. type SelfSubjectReview struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. diff --git a/vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go index bc17c5f30..1ffcc99e7 100644 --- a/vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go @@ -24,11 +24,11 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_SelfSubjectReview = map[string]string{ - "": "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated.", + "": "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "status": "Status is filled in by the server with the user attributes.", } diff --git a/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.prerelease-lifecycle.go index b86dfbef6..62a70a781 100644 --- a/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -24,17 +24,17 @@ package v1alpha1 // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *SelfSubjectReview) APILifecycleIntroduced() (major, minor int) { - return 1, 25 + return 1, 26 } // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. func (in *SelfSubjectReview) APILifecycleDeprecated() (major, minor int) { - return 1, 28 + return 1, 29 } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *SelfSubjectReview) APILifecycleRemoved() (major, minor int) { - return 1, 31 + return 1, 32 } diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go index 1978dcf6a..7f1d5ca6c 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go @@ -72,10 +72,66 @@ func (m *ExtraValue) XXX_DiscardUnknown() { var xxx_messageInfo_ExtraValue proto.InternalMessageInfo +func (m *SelfSubjectReview) Reset() { *m = SelfSubjectReview{} } +func (*SelfSubjectReview) ProtoMessage() {} +func (*SelfSubjectReview) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{1} +} +func (m *SelfSubjectReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectReview.Merge(m, src) +} +func (m *SelfSubjectReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectReview proto.InternalMessageInfo + +func (m *SelfSubjectReviewStatus) Reset() { *m = SelfSubjectReviewStatus{} } +func (*SelfSubjectReviewStatus) ProtoMessage() {} +func (*SelfSubjectReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{2} +} +func (m *SelfSubjectReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectReviewStatus.Merge(m, src) +} +func (m *SelfSubjectReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectReviewStatus proto.InternalMessageInfo + func (m *TokenReview) Reset() { *m = TokenReview{} } func (*TokenReview) ProtoMessage() {} func (*TokenReview) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{1} + return fileDescriptor_77c9b20d3ad27844, []int{3} } func (m *TokenReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +159,7 @@ var xxx_messageInfo_TokenReview proto.InternalMessageInfo func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } func (*TokenReviewSpec) ProtoMessage() {} func (*TokenReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{2} + return fileDescriptor_77c9b20d3ad27844, []int{4} } func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +187,7 @@ var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } func (*TokenReviewStatus) ProtoMessage() {} func (*TokenReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{3} + return fileDescriptor_77c9b20d3ad27844, []int{5} } func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +215,7 @@ var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo func (m *UserInfo) Reset() { *m = UserInfo{} } func (*UserInfo) ProtoMessage() {} func (*UserInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{4} + return fileDescriptor_77c9b20d3ad27844, []int{6} } func (m *UserInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -186,6 +242,8 @@ var xxx_messageInfo_UserInfo proto.InternalMessageInfo func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1beta1.ExtraValue") + proto.RegisterType((*SelfSubjectReview)(nil), "k8s.io.api.authentication.v1beta1.SelfSubjectReview") + proto.RegisterType((*SelfSubjectReviewStatus)(nil), "k8s.io.api.authentication.v1beta1.SelfSubjectReviewStatus") proto.RegisterType((*TokenReview)(nil), "k8s.io.api.authentication.v1beta1.TokenReview") proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewSpec") proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewStatus") @@ -198,49 +256,53 @@ func init() { } var fileDescriptor_77c9b20d3ad27844 = []byte{ - // 666 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcf, 0x4e, 0x13, 0x5f, - 0x14, 0x9e, 0xe9, 0x1f, 0xd2, 0xde, 0xfe, 0xfa, 0x13, 0x6f, 0x62, 0xd2, 0x34, 0x71, 0x0a, 0x75, - 0x43, 0x82, 0xdc, 0x11, 0x42, 0x90, 0xe0, 0x8a, 0x51, 0x42, 0x30, 0x21, 0x26, 0x57, 0x70, 0xa1, - 0x2e, 0xbc, 0x9d, 0x1e, 0xa6, 0x63, 0x9d, 0x3f, 0xb9, 0x73, 0xa7, 0xca, 0x8e, 0x47, 0x70, 0xe9, - 0xd2, 0xc4, 0x27, 0x71, 0xc7, 0x92, 0x25, 0x0b, 0xd3, 0xc8, 0xf8, 0x04, 0xbe, 0x81, 0xb9, 0x77, - 0x2e, 0x4c, 0x81, 0x68, 0x61, 0x37, 0xf7, 0x3b, 0xe7, 0xfb, 0xce, 0x39, 0xdf, 0xe9, 0x29, 0x7a, - 0x3e, 0x5c, 0x4f, 0x88, 0x1f, 0xd9, 0xc3, 0xb4, 0x07, 0x3c, 0x04, 0x01, 0x89, 0x3d, 0x82, 0xb0, - 0x1f, 0x71, 0x5b, 0x07, 0x58, 0xec, 0xdb, 0x2c, 0x15, 0x03, 0x08, 0x85, 0xef, 0x32, 0xe1, 0x47, - 0xa1, 0x3d, 0x5a, 0xee, 0x81, 0x60, 0xcb, 0xb6, 0x07, 0x21, 0x70, 0x26, 0xa0, 0x4f, 0x62, 0x1e, - 0x89, 0x08, 0xcf, 0xe7, 0x14, 0xc2, 0x62, 0x9f, 0x5c, 0xa6, 0x10, 0x4d, 0x69, 0x2f, 0x79, 0xbe, - 0x18, 0xa4, 0x3d, 0xe2, 0x46, 0x81, 0xed, 0x45, 0x5e, 0x64, 0x2b, 0x66, 0x2f, 0x3d, 0x50, 0x2f, - 0xf5, 0x50, 0x5f, 0xb9, 0x62, 0x7b, 0xb5, 0x68, 0x22, 0x60, 0xee, 0xc0, 0x0f, 0x81, 0x1f, 0xda, - 0xf1, 0xd0, 0x93, 0x40, 0x62, 0x07, 0x20, 0x98, 0x3d, 0xba, 0xd6, 0x47, 0xdb, 0xfe, 0x1b, 0x8b, - 0xa7, 0xa1, 0xf0, 0x03, 0xb8, 0x46, 0x58, 0x9b, 0x46, 0x48, 0xdc, 0x01, 0x04, 0xec, 0x2a, 0xaf, - 0xfb, 0x18, 0xa1, 0xad, 0x4f, 0x82, 0xb3, 0x57, 0xec, 0x43, 0x0a, 0xb8, 0x83, 0xaa, 0xbe, 0x80, - 0x20, 0x69, 0x99, 0x73, 0xe5, 0x85, 0xba, 0x53, 0xcf, 0xc6, 0x9d, 0xea, 0x8e, 0x04, 0x68, 0x8e, - 0x6f, 0xd4, 0xbe, 0x7c, 0xed, 0x18, 0x47, 0x3f, 0xe6, 0x8c, 0xee, 0xb7, 0x12, 0x6a, 0xec, 0x45, - 0x43, 0x08, 0x29, 0x8c, 0x7c, 0xf8, 0x88, 0xdf, 0xa1, 0x9a, 0x1c, 0xa6, 0xcf, 0x04, 0x6b, 0x99, - 0x73, 0xe6, 0x42, 0x63, 0xe5, 0x11, 0x29, 0xcc, 0xbc, 0xe8, 0x89, 0xc4, 0x43, 0x4f, 0x02, 0x09, - 0x91, 0xd9, 0x64, 0xb4, 0x4c, 0x5e, 0xf4, 0xde, 0x83, 0x2b, 0x76, 0x41, 0x30, 0x07, 0x1f, 0x8f, - 0x3b, 0x46, 0x36, 0xee, 0xa0, 0x02, 0xa3, 0x17, 0xaa, 0x78, 0x0f, 0x55, 0x92, 0x18, 0xdc, 0x56, - 0x49, 0xa9, 0xaf, 0x90, 0xa9, 0xab, 0x22, 0x13, 0xfd, 0xbd, 0x8c, 0xc1, 0x75, 0xfe, 0xd3, 0xfa, - 0x15, 0xf9, 0xa2, 0x4a, 0x0d, 0xbf, 0x45, 0x33, 0x89, 0x60, 0x22, 0x4d, 0x5a, 0x65, 0xa5, 0xbb, - 0x7a, 0x4b, 0x5d, 0xc5, 0x75, 0xfe, 0xd7, 0xca, 0x33, 0xf9, 0x9b, 0x6a, 0xcd, 0xae, 0x8b, 0xee, - 0x5c, 0x69, 0x02, 0x3f, 0x40, 0x55, 0x21, 0x21, 0xe5, 0x52, 0xdd, 0x69, 0x6a, 0x66, 0x35, 0xcf, - 0xcb, 0x63, 0x78, 0x11, 0xd5, 0x59, 0xda, 0xf7, 0x21, 0x74, 0x21, 0x69, 0x95, 0xd4, 0x32, 0x9a, - 0xd9, 0xb8, 0x53, 0xdf, 0x3c, 0x07, 0x69, 0x11, 0xef, 0xfe, 0x36, 0xd1, 0xdd, 0x6b, 0x2d, 0xe1, - 0x27, 0xa8, 0x39, 0xd1, 0x3e, 0xf4, 0x55, 0xbd, 0x9a, 0x73, 0x4f, 0xd7, 0x6b, 0x6e, 0x4e, 0x06, - 0xe9, 0xe5, 0x5c, 0xbc, 0x8b, 0x2a, 0x69, 0x02, 0x5c, 0x7b, 0xbd, 0x78, 0x03, 0x4f, 0xf6, 0x13, - 0xe0, 0x3b, 0xe1, 0x41, 0x54, 0x98, 0x2c, 0x11, 0xaa, 0x64, 0x2e, 0x8f, 0x53, 0xf9, 0xf7, 0x38, - 0xd2, 0x20, 0xe0, 0x3c, 0xe2, 0x6a, 0x21, 0x13, 0x06, 0x6d, 0x49, 0x90, 0xe6, 0xb1, 0xee, 0xf7, - 0x12, 0xaa, 0x9d, 0x97, 0xc4, 0x0f, 0x51, 0x4d, 0x96, 0x09, 0x59, 0x00, 0xda, 0xd5, 0x59, 0x4d, - 0x52, 0x39, 0x12, 0xa7, 0x17, 0x19, 0xf8, 0x3e, 0x2a, 0xa7, 0x7e, 0x5f, 0x8d, 0x56, 0x77, 0x1a, - 0x3a, 0xb1, 0xbc, 0xbf, 0xf3, 0x8c, 0x4a, 0x1c, 0x77, 0xd1, 0x8c, 0xc7, 0xa3, 0x34, 0x96, 0x3f, - 0x08, 0xd9, 0x28, 0x92, 0x6b, 0xdd, 0x56, 0x08, 0xd5, 0x11, 0xfc, 0x06, 0x55, 0x41, 0x5e, 0x8d, - 0x9a, 0xa5, 0xb1, 0xb2, 0x76, 0x0b, 0x7f, 0x88, 0x3a, 0xb7, 0xad, 0x50, 0xf0, 0xc3, 0x89, 0xd1, - 0x24, 0x46, 0x73, 0xcd, 0xb6, 0xa7, 0x4f, 0x52, 0xe5, 0xe0, 0x59, 0x54, 0x1e, 0xc2, 0x61, 0x3e, - 0x16, 0x95, 0x9f, 0xf8, 0x29, 0xaa, 0x8e, 0xe4, 0xb5, 0xea, 0xe5, 0x2c, 0xdd, 0xa0, 0x78, 0x71, - 0xe2, 0x34, 0xe7, 0x6e, 0x94, 0xd6, 0x4d, 0x67, 0xfb, 0xf8, 0xcc, 0x32, 0x4e, 0xce, 0x2c, 0xe3, - 0xf4, 0xcc, 0x32, 0x8e, 0x32, 0xcb, 0x3c, 0xce, 0x2c, 0xf3, 0x24, 0xb3, 0xcc, 0xd3, 0xcc, 0x32, - 0x7f, 0x66, 0x96, 0xf9, 0xf9, 0x97, 0x65, 0xbc, 0x9e, 0x9f, 0xfa, 0x2f, 0xfa, 0x27, 0x00, 0x00, - 0xff, 0xff, 0xb8, 0x72, 0x2c, 0x2c, 0x82, 0x05, 0x00, 0x00, + // 725 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0x4f, 0x4f, 0x13, 0x41, + 0x14, 0xef, 0xf6, 0x0f, 0x69, 0xa7, 0x56, 0x61, 0x12, 0x23, 0x69, 0xe2, 0x16, 0x6a, 0x62, 0x48, + 0x80, 0x59, 0x21, 0x04, 0x09, 0x9e, 0x58, 0x25, 0x04, 0x13, 0x62, 0x32, 0x05, 0x0f, 0xea, 0xc1, + 0xe9, 0xf6, 0xb1, 0x5d, 0x4b, 0x77, 0x37, 0xbb, 0xb3, 0x55, 0x6e, 0x7c, 0x04, 0x8f, 0x1e, 0x4d, + 0xfc, 0x24, 0xde, 0x38, 0x72, 0xc4, 0xc4, 0x34, 0xb2, 0x7e, 0x02, 0xbf, 0x81, 0x99, 0xd9, 0x61, + 0xdb, 0x82, 0x14, 0xb8, 0x78, 0xdb, 0xf9, 0xcd, 0xfb, 0xfd, 0xde, 0x7b, 0xbf, 0xf7, 0x32, 0x8b, + 0x5e, 0x76, 0xd6, 0x42, 0xe2, 0x78, 0x46, 0x27, 0x6a, 0x42, 0xe0, 0x02, 0x87, 0xd0, 0xe8, 0x81, + 0xdb, 0xf2, 0x02, 0x43, 0x5d, 0x30, 0xdf, 0x31, 0x58, 0xc4, 0xdb, 0xe0, 0x72, 0xc7, 0x62, 0xdc, + 0xf1, 0x5c, 0xa3, 0xb7, 0xd4, 0x04, 0xce, 0x96, 0x0c, 0x1b, 0x5c, 0x08, 0x18, 0x87, 0x16, 0xf1, + 0x03, 0x8f, 0x7b, 0x78, 0x36, 0xa1, 0x10, 0xe6, 0x3b, 0x64, 0x94, 0x42, 0x14, 0xa5, 0xba, 0x68, + 0x3b, 0xbc, 0x1d, 0x35, 0x89, 0xe5, 0x75, 0x0d, 0xdb, 0xb3, 0x3d, 0x43, 0x32, 0x9b, 0xd1, 0xbe, + 0x3c, 0xc9, 0x83, 0xfc, 0x4a, 0x14, 0xab, 0x0b, 0xe3, 0x8a, 0xb8, 0x98, 0xbf, 0xba, 0x32, 0x88, + 0xee, 0x32, 0xab, 0xed, 0xb8, 0x10, 0x1c, 0x1a, 0x7e, 0xc7, 0x16, 0x40, 0x68, 0x74, 0x81, 0xb3, + 0x7f, 0xb1, 0x8c, 0xab, 0x58, 0x41, 0xe4, 0x72, 0xa7, 0x0b, 0x97, 0x08, 0xab, 0xd7, 0x11, 0x42, + 0xab, 0x0d, 0x5d, 0x76, 0x91, 0x57, 0x7f, 0x8a, 0xd0, 0xe6, 0x27, 0x1e, 0xb0, 0xd7, 0xec, 0x20, + 0x02, 0x5c, 0x43, 0x05, 0x87, 0x43, 0x37, 0x9c, 0xd6, 0x66, 0x72, 0x73, 0x25, 0xb3, 0x14, 0xf7, + 0x6b, 0x85, 0x6d, 0x01, 0xd0, 0x04, 0x5f, 0x2f, 0x7e, 0xf9, 0x5a, 0xcb, 0x1c, 0xfd, 0x9c, 0xc9, + 0xd4, 0x7f, 0x68, 0x68, 0xaa, 0x01, 0x07, 0xfb, 0x8d, 0xa8, 0xf9, 0x01, 0x2c, 0x4e, 0xa1, 0xe7, + 0xc0, 0x47, 0xfc, 0x1e, 0x15, 0x45, 0x4b, 0x2d, 0xc6, 0xd9, 0xb4, 0x36, 0xa3, 0xcd, 0x95, 0x97, + 0x9f, 0x90, 0xc1, 0x00, 0xd2, 0xca, 0x88, 0xdf, 0xb1, 0x05, 0x10, 0x12, 0x11, 0x4d, 0x7a, 0x4b, + 0xe4, 0x95, 0x54, 0xd9, 0x01, 0xce, 0x4c, 0x7c, 0xdc, 0xaf, 0x65, 0xe2, 0x7e, 0x0d, 0x0d, 0x30, + 0x9a, 0xaa, 0xe2, 0x26, 0x9a, 0x08, 0x39, 0xe3, 0x51, 0x38, 0x9d, 0x95, 0xfa, 0xeb, 0xe4, 0xda, + 0x01, 0x93, 0x4b, 0x75, 0x36, 0xa4, 0x82, 0x79, 0x57, 0x65, 0x9a, 0x48, 0xce, 0x54, 0x29, 0xd7, + 0x3d, 0xf4, 0xe0, 0x0a, 0x0a, 0xde, 0x45, 0xc5, 0x28, 0x84, 0x60, 0xdb, 0xdd, 0xf7, 0x54, 0x83, + 0x8f, 0xc7, 0x16, 0x40, 0xf6, 0x54, 0xb4, 0x39, 0xa9, 0x92, 0x15, 0xcf, 0x11, 0x9a, 0x2a, 0xd5, + 0xbf, 0x65, 0x51, 0x79, 0xd7, 0xeb, 0x80, 0xfb, 0xdf, 0x6c, 0xdc, 0x45, 0xf9, 0xd0, 0x07, 0x4b, + 0x99, 0xb8, 0x7c, 0x03, 0x13, 0x87, 0xea, 0x6b, 0xf8, 0x60, 0x99, 0x77, 0x94, 0x7e, 0x5e, 0x9c, + 0xa8, 0x54, 0xc3, 0xef, 0xd2, 0xe1, 0xe4, 0xa4, 0xee, 0xca, 0x2d, 0x75, 0xc7, 0x8f, 0xc5, 0x42, + 0xf7, 0x2e, 0x14, 0x81, 0x1f, 0xa1, 0x02, 0x17, 0x90, 0x74, 0xa9, 0x64, 0x56, 0x14, 0xb3, 0x90, + 0xc4, 0x25, 0x77, 0x78, 0x1e, 0x95, 0x58, 0xd4, 0x72, 0xc0, 0xb5, 0x40, 0x6c, 0x8d, 0xd8, 0xec, + 0x4a, 0xdc, 0xaf, 0x95, 0x36, 0xce, 0x41, 0x3a, 0xb8, 0xaf, 0xff, 0xd1, 0xd0, 0xd4, 0xa5, 0x92, + 0xf0, 0x33, 0x54, 0x19, 0x2a, 0x1f, 0x5a, 0x32, 0x5f, 0xd1, 0xbc, 0xaf, 0xf2, 0x55, 0x36, 0x86, + 0x2f, 0xe9, 0x68, 0x2c, 0xde, 0x41, 0x79, 0x31, 0x69, 0xe5, 0xf5, 0xfc, 0x0d, 0x3c, 0x49, 0x97, + 0x26, 0x35, 0x59, 0x20, 0x54, 0xca, 0x8c, 0xb6, 0x93, 0x1f, 0xdf, 0x8e, 0x30, 0x08, 0x82, 0xc0, + 0x0b, 0xe4, 0x40, 0x86, 0x0c, 0xda, 0x14, 0x20, 0x4d, 0xee, 0xea, 0xdf, 0xb3, 0x28, 0xdd, 0x4a, + 0xbc, 0x90, 0x6c, 0xb8, 0xcb, 0xba, 0xa0, 0x5c, 0x1d, 0xd9, 0x5c, 0x81, 0xd3, 0x34, 0x02, 0x3f, + 0x44, 0xb9, 0xc8, 0x69, 0xc9, 0xd6, 0x4a, 0x66, 0x59, 0x05, 0xe6, 0xf6, 0xb6, 0x5f, 0x50, 0x81, + 0xe3, 0x3a, 0x9a, 0xb0, 0x03, 0x2f, 0xf2, 0xc5, 0x42, 0x88, 0x42, 0x91, 0x18, 0xeb, 0x96, 0x44, + 0xa8, 0xba, 0xc1, 0x6f, 0x51, 0x01, 0xc4, 0x13, 0x24, 0x7b, 0x29, 0x2f, 0xaf, 0xde, 0xc2, 0x1f, + 0x22, 0xdf, 0xae, 0x4d, 0x97, 0x07, 0x87, 0x43, 0xad, 0x09, 0x8c, 0x26, 0x9a, 0x55, 0x5b, 0xbd, + 0x6f, 0x32, 0x06, 0x4f, 0xa2, 0x5c, 0x07, 0x0e, 0x93, 0xb6, 0xa8, 0xf8, 0xc4, 0xcf, 0x51, 0xa1, + 0x27, 0x9e, 0x3e, 0x35, 0x9c, 0xc5, 0x1b, 0x24, 0x1f, 0xbc, 0x97, 0x34, 0xe1, 0xae, 0x67, 0xd7, + 0x34, 0x73, 0xeb, 0xf8, 0x4c, 0xcf, 0x9c, 0x9c, 0xe9, 0x99, 0xd3, 0x33, 0x3d, 0x73, 0x14, 0xeb, + 0xda, 0x71, 0xac, 0x6b, 0x27, 0xb1, 0xae, 0x9d, 0xc6, 0xba, 0xf6, 0x2b, 0xd6, 0xb5, 0xcf, 0xbf, + 0xf5, 0xcc, 0x9b, 0xd9, 0x6b, 0x7f, 0x60, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x19, 0x49, + 0x3f, 0xfd, 0x06, 0x00, 0x00, } func (m ExtraValue) Marshal() (dAtA []byte, err error) { @@ -275,6 +337,82 @@ func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SelfSubjectReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SelfSubjectReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *TokenReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -517,6 +655,30 @@ func (m ExtraValue) Size() (n int) { return n } +func (m *SelfSubjectReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.UserInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *TokenReview) Size() (n int) { if m == nil { return 0 @@ -603,6 +765,27 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *SelfSubjectReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SelfSubjectReviewStatus", "SelfSubjectReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectReviewStatus{`, + `UserInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UserInfo), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *TokenReview) String() string { if this == nil { return "nil" @@ -752,6 +935,205 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } return nil } +func (m *SelfSubjectReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *TokenReview) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.proto b/vendor/k8s.io/api/authentication/v1beta1/generated.proto index d1847a02e..53b4635d7 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.proto @@ -21,6 +21,7 @@ syntax = "proto2"; package k8s.io.api.authentication.v1beta1; +import "k8s.io/api/authentication/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -37,6 +38,26 @@ message ExtraValue { repeated string items = 1; } +// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. +message SelfSubjectReview { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Status is filled in by the server with the user attributes. + optional SelfSubjectReviewStatus status = 2; +} + +// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user. +message SelfSubjectReviewStatus { + // User attributes of the user making this request. + // +optional + optional k8s.io.api.authentication.v1.UserInfo userInfo = 1; +} + // TokenReview attempts to authenticate a token to a known user. // Note: TokenReview requests may be cached by the webhook token authenticator // plugin in the kube-apiserver. diff --git a/vendor/k8s.io/api/authentication/v1beta1/register.go b/vendor/k8s.io/api/authentication/v1beta1/register.go index ed23e50f7..075ee1263 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/register.go +++ b/vendor/k8s.io/api/authentication/v1beta1/register.go @@ -44,6 +44,7 @@ var ( // Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, + &SelfSubjectReview{}, &TokenReview{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/authentication/v1beta1/types.go b/vendor/k8s.io/api/authentication/v1beta1/types.go index 08e1e09b6..5bce82e7c 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/types.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types.go @@ -19,6 +19,7 @@ package v1beta1 import ( "fmt" + v1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -113,3 +114,29 @@ type ExtraValue []string func (t ExtraValue) String() string { return fmt.Sprintf("%v", []string(t)) } + +// +genclient +// +genclient:nonNamespaced +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.27 + +// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. +type SelfSubjectReview struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Status is filled in by the server with the user attributes. + Status SelfSubjectReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user. +type SelfSubjectReviewStatus struct { + // User attributes of the user making this request. + // +optional + UserInfo v1.UserInfo `json:"userInfo,omitempty" protobuf:"bytes,1,opt,name=userInfo"` +} diff --git a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go index 1086955c3..d6644f2cf 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go @@ -24,9 +24,28 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_SelfSubjectReview = map[string]string{ + "": "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "status": "Status is filled in by the server with the user attributes.", +} + +func (SelfSubjectReview) SwaggerDoc() map[string]string { + return map_SelfSubjectReview +} + +var map_SelfSubjectReviewStatus = map[string]string{ + "": "SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user.", + "userInfo": "User attributes of the user making this request.", +} + +func (SelfSubjectReviewStatus) SwaggerDoc() map[string]string { + return map_SelfSubjectReviewStatus +} + var map_TokenReview = map[string]string{ "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go index 059ec1a86..99ffadf7b 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go @@ -45,6 +45,50 @@ func (in ExtraValue) DeepCopy() ExtraValue { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectReview) DeepCopyInto(out *SelfSubjectReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReview. +func (in *SelfSubjectReview) DeepCopy() *SelfSubjectReview { + if in == nil { + return nil + } + out := new(SelfSubjectReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelfSubjectReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectReviewStatus) DeepCopyInto(out *SelfSubjectReviewStatus) { + *out = *in + in.UserInfo.DeepCopyInto(&out.UserInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReviewStatus. +func (in *SelfSubjectReviewStatus) DeepCopy() *SelfSubjectReviewStatus { + if in == nil { + return nil + } + out := new(SelfSubjectReviewStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go index e448106e4..904796925 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go @@ -25,6 +25,24 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" ) +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SelfSubjectReview) APILifecycleIntroduced() (major, minor int) { + return 1, 27 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *SelfSubjectReview) APILifecycleDeprecated() (major, minor int) { + return 1, 30 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *SelfSubjectReview) APILifecycleRemoved() (major, minor int) { + return 1, 33 +} + // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *TokenReview) APILifecycleIntroduced() (major, minor int) { diff --git a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go index 2e5fbea7a..93229485c 100644 --- a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_LocalSubjectAccessReview = map[string]string{ diff --git a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go index 2d291189e..e0846be7a 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_LocalSubjectAccessReview = map[string]string{ diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto index 8cf997a75..1dbafd1a5 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -87,13 +87,13 @@ message ContainerResourceMetricStatus { // CrossVersionObjectReference contains enough information to let you identify the referred resource. // +structType=atomic message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names optional string name = 2; - // API version of the referent + // apiVersion is the API version of the referent // +optional optional string apiVersion = 3; } @@ -147,11 +147,11 @@ message HorizontalPodAutoscaler { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional HorizontalPodAutoscalerSpec spec = 2; - // current information about the autoscaler. + // status is the current information about the autoscaler. // +optional optional HorizontalPodAutoscalerStatus status = 3; } @@ -186,7 +186,7 @@ message HorizontalPodAutoscalerList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // list of horizontal pod autoscaler objects. + // items is the list of horizontal pod autoscaler objects. repeated HorizontalPodAutoscaler items = 2; } @@ -204,10 +204,10 @@ message HorizontalPodAutoscalerSpec { // +optional optional int32 minReplicas = 2; - // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. + // maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. optional int32 maxReplicas = 3; - // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; + // targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; // if not specified the default autoscaling policy will be used. // +optional optional int32 targetCPUUtilizationPercentage = 4; @@ -215,22 +215,22 @@ message HorizontalPodAutoscalerSpec { // current status of a horizontal pod autoscaler message HorizontalPodAutoscalerStatus { - // most recent generation observed by this autoscaler. + // observedGeneration is the most recent generation observed by this autoscaler. // +optional optional int64 observedGeneration = 1; - // last time the HorizontalPodAutoscaler scaled the number of pods; + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods; // used by the autoscaler to control how often the number of pods is changed. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; - // current number of replicas of pods managed by this autoscaler. + // currentReplicas is the current number of replicas of pods managed by this autoscaler. optional int32 currentReplicas = 3; - // desired number of replicas of pods managed by this autoscaler. + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler. optional int32 desiredReplicas = 4; - // current average CPU utilization over all pods, represented as a percentage of requested CPU, + // currentCPUUtilizationPercentage is the current average CPU utilization over all pods, represented as a percentage of requested CPU, // e.g. 70 means that an average pod is using now 70% of its requested CPU. // +optional optional int32 currentCPUUtilizationPercentage = 5; @@ -264,7 +264,7 @@ message MetricSpec { // +optional optional ResourceMetricSource resource = 4; - // container resource refers to a resource metric (such as those specified in + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod of the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available @@ -309,7 +309,7 @@ message MetricStatus { // +optional optional ResourceMetricStatus resource = 4; - // container resource refers to a resource metric (such as those specified in + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available @@ -464,31 +464,31 @@ message Scale { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // status is the current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } // ScaleSpec describes the attributes of a scale subresource. message ScaleSpec { - // desired number of instances for the scaled object. + // replicas is the desired number of instances for the scaled object. // +optional optional int32 replicas = 1; } // ScaleStatus represents the current status of a scale subresource. message ScaleStatus { - // actual number of observed instances of the scaled object. + // replicas is the actual number of observed instances of the scaled object. optional int32 replicas = 1; - // label query over pods that should match the replicas count. This is same + // selector is the label query over pods that should match the replicas count. This is same // as the label selector but in the string format to avoid introspection // by clients. The string will be in the same format as the query-param syntax. - // More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info about label selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional optional string selector = 2; } diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go index 6397430a2..450829017 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types.go +++ b/vendor/k8s.io/api/autoscaling/v1/types.go @@ -25,11 +25,13 @@ import ( // CrossVersionObjectReference contains enough information to let you identify the referred resource. // +structType=atomic type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // API version of the referent + + // apiVersion is the API version of the referent // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` } @@ -46,9 +48,11 @@ type HorizontalPodAutoscalerSpec struct { // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` - // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. + + // maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` - // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; + + // targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; // if not specified the default autoscaling policy will be used. // +optional TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty" protobuf:"varint,4,opt,name=targetCPUUtilizationPercentage"` @@ -56,22 +60,22 @@ type HorizontalPodAutoscalerSpec struct { // current status of a horizontal pod autoscaler type HorizontalPodAutoscalerStatus struct { - // most recent generation observed by this autoscaler. + // observedGeneration is the most recent generation observed by this autoscaler. // +optional ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - // last time the HorizontalPodAutoscaler scaled the number of pods; + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods; // used by the autoscaler to control how often the number of pods is changed. // +optional LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` - // current number of replicas of pods managed by this autoscaler. + // currentReplicas is the current number of replicas of pods managed by this autoscaler. CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` - // desired number of replicas of pods managed by this autoscaler. + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler. DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` - // current average CPU utilization over all pods, represented as a percentage of requested CPU, + // currentCPUUtilizationPercentage is the current average CPU utilization over all pods, represented as a percentage of requested CPU, // e.g. 70 means that an average pod is using now 70% of its requested CPU. // +optional CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty" protobuf:"varint,5,opt,name=currentCPUUtilizationPercentage"` @@ -87,11 +91,11 @@ type HorizontalPodAutoscaler struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current information about the autoscaler. + // status is the current information about the autoscaler. // +optional Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -105,7 +109,7 @@ type HorizontalPodAutoscalerList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // list of horizontal pod autoscaler objects. + // items is the list of horizontal pod autoscaler objects. Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -118,31 +122,31 @@ type Scale struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // status is the current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // ScaleSpec describes the attributes of a scale subresource. type ScaleSpec struct { - // desired number of instances for the scaled object. + // replicas is the desired number of instances for the scaled object. // +optional Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` } // ScaleStatus represents the current status of a scale subresource. type ScaleStatus struct { - // actual number of observed instances of the scaled object. + // replicas is the actual number of observed instances of the scaled object. Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // label query over pods that should match the replicas count. This is same + // selector is the label query over pods that should match the replicas count. This is same // as the label selector but in the string format to avoid introspection // by clients. The string will be in the same format as the query-param syntax. - // More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info about label selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional Selector string `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` } @@ -194,11 +198,13 @@ type MetricSpec struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -206,7 +212,8 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` - // container resource refers to a resource metric (such as those specified in + + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod of the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available @@ -214,6 +221,7 @@ type MetricSpec struct { // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -231,6 +239,7 @@ type ObjectMetricSource struct { // metricName is the name of the metric in question. MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // targetValue is the target value of the metric (as a quantity). TargetValue resource.Quantity `json:"targetValue" protobuf:"bytes,3,name=targetValue"` @@ -239,6 +248,7 @@ type ObjectMetricSource struct { // When unset, just the metricName will be used to gather metrics. // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,name=selector"` + // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional @@ -252,6 +262,7 @@ type ObjectMetricSource struct { type PodsMetricSource struct { // metricName is the name of the metric in question MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // targetAverageValue is the target value of the average of the // metric across all relevant pods (as a quantity) TargetAverageValue resource.Quantity `json:"targetAverageValue" protobuf:"bytes,2,name=targetAverageValue"` @@ -273,11 +284,13 @@ type PodsMetricSource struct { type ResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. // +optional TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the target value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. @@ -295,16 +308,19 @@ type ResourceMetricSource struct { type ContainerResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. // +optional TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the target value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` + // container is the name of the container in the pods of the scaling target. Container string `json:"container" protobuf:"bytes,5,opt,name=container"` } @@ -315,14 +331,17 @@ type ContainerResourceMetricSource struct { type ExternalMetricSource struct { // metricName is the name of the metric in question. MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // metricSelector is used to identify a specific time series // within a given metric. // +optional MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"` + // targetValue is the target value of the metric (as a quantity). // Mutually exclusive with TargetAverageValue. // +optional TargetValue *resource.Quantity `json:"targetValue,omitempty" protobuf:"bytes,3,opt,name=targetValue"` + // targetAverageValue is the target per-pod value of global metric (as a quantity). // Mutually exclusive with TargetValue. // +optional @@ -341,11 +360,13 @@ type MetricStatus struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -353,13 +374,15 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` - // container resource refers to a resource metric (such as those specified in + + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics using the "pods" source. // +optional ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -390,15 +413,19 @@ const ( type HorizontalPodAutoscalerCondition struct { // type describes the current condition Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about // the transition // +optional @@ -413,6 +440,7 @@ type ObjectMetricStatus struct { // metricName is the name of the metric in question. MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // currentValue is the current value of the metric (as a quantity). CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` @@ -421,6 +449,7 @@ type ObjectMetricStatus struct { // When unset, just the metricName will be used to gather metrics. // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,name=selector"` + // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional @@ -432,6 +461,7 @@ type ObjectMetricStatus struct { type PodsMetricStatus struct { // metricName is the name of the metric in question MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // currentAverageValue is the current value of the average of the // metric across all relevant pods (as a quantity) CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,2,name=currentAverageValue"` @@ -451,6 +481,7 @@ type PodsMetricStatus struct { type ResourceMetricStatus struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. It will only be @@ -458,6 +489,7 @@ type ResourceMetricStatus struct { // specification. // +optional CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the current value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. @@ -473,6 +505,7 @@ type ResourceMetricStatus struct { type ContainerResourceMetricStatus struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. It will only be @@ -480,11 +513,13 @@ type ContainerResourceMetricStatus struct { // specification. // +optional CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the current value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` + // container is the name of the container in the pods of the scaling taget Container string `json:"container" protobuf:"bytes,4,opt,name=container"` } @@ -495,12 +530,14 @@ type ExternalMetricStatus struct { // metricName is the name of a metric used for autoscaling in // metric system. MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // metricSelector is used to identify a specific time series // within a given metric. // +optional MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"` // currentValue is the current value of the metric (as a quantity) CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` + // currentAverageValue is the current value of metric averaged over autoscaled pods. // +optional CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty" protobuf:"bytes,4,opt,name=currentAverageValue"` diff --git a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go index ca288e912..37c2b36a5 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ @@ -53,9 +53,9 @@ func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "apiVersion": "API version of the referent", + "kind": "kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "name": "name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "apiVersion": "apiVersion is the API version of the referent", } func (CrossVersionObjectReference) SwaggerDoc() map[string]string { @@ -89,8 +89,8 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscaler = map[string]string{ "": "configuration of a horizontal pod autoscaler.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", - "status": "current information about the autoscaler.", + "spec": "spec defines the behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "status is the current information about the autoscaler.", } func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { @@ -113,7 +113,7 @@ func (HorizontalPodAutoscalerCondition) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerList = map[string]string{ "": "list of horizontal pod autoscaler objects.", "metadata": "Standard list metadata.", - "items": "list of horizontal pod autoscaler objects.", + "items": "items is the list of horizontal pod autoscaler objects.", } func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { @@ -124,8 +124,8 @@ var map_HorizontalPodAutoscalerSpec = map[string]string{ "": "specification of a horizontal pod autoscaler.", "scaleTargetRef": "reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.", "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", - "maxReplicas": "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", - "targetCPUUtilizationPercentage": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.", + "maxReplicas": "maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", + "targetCPUUtilizationPercentage": "targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.", } func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { @@ -134,11 +134,11 @@ func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerStatus = map[string]string{ "": "current status of a horizontal pod autoscaler", - "observedGeneration": "most recent generation observed by this autoscaler.", - "lastScaleTime": "last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.", - "currentReplicas": "current number of replicas of pods managed by this autoscaler.", - "desiredReplicas": "desired number of replicas of pods managed by this autoscaler.", - "currentCPUUtilizationPercentage": "current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.", + "observedGeneration": "observedGeneration is the most recent generation observed by this autoscaler.", + "lastScaleTime": "lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.", + "currentReplicas": "currentReplicas is the current number of replicas of pods managed by this autoscaler.", + "desiredReplicas": "desiredReplicas is the desired number of replicas of pods managed by this autoscaler.", + "currentCPUUtilizationPercentage": "currentCPUUtilizationPercentage is the current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.", } func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { @@ -151,7 +151,7 @@ var map_MetricSpec = map[string]string{ "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } @@ -165,7 +165,7 @@ var map_MetricStatus = map[string]string{ "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } @@ -246,8 +246,8 @@ func (ResourceMetricStatus) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", + "spec": "spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "status is the current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { @@ -256,7 +256,7 @@ func (Scale) SwaggerDoc() map[string]string { var map_ScaleSpec = map[string]string{ "": "ScaleSpec describes the attributes of a scale subresource.", - "replicas": "desired number of instances for the scaled object.", + "replicas": "replicas is the desired number of instances for the scaled object.", } func (ScaleSpec) SwaggerDoc() map[string]string { @@ -265,8 +265,8 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "ScaleStatus represents the current status of a scale subresource.", - "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "replicas": "replicas is the actual number of observed instances of the scaled object.", + "selector": "selector is the label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", } func (ScaleStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/autoscaling/v2/generated.proto b/vendor/k8s.io/api/autoscaling/v2/generated.proto index c08328023..a9e36975f 100644 --- a/vendor/k8s.io/api/autoscaling/v2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2/generated.proto @@ -54,25 +54,25 @@ message ContainerResourceMetricSource { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. message ContainerResourceMetricStatus { - // Name is the name of the resource in question. + // name is the name of the resource in question. optional string name = 1; // current contains the current value for the given metric optional MetricValueStatus current = 2; - // Container is the name of the container in the pods of the scaling target + // container is the name of the container in the pods of the scaling target optional string container = 3; } // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names optional string name = 2; - // API version of the referent + // apiVersion is the API version of the referent // +optional optional string apiVersion = 3; } @@ -100,14 +100,14 @@ message ExternalMetricStatus { // HPAScalingPolicy is a single policy which must hold true for a specified past interval. message HPAScalingPolicy { - // Type is used to specify the scaling policy. + // type is used to specify the scaling policy. optional string type = 1; - // Value contains the amount of change which is permitted by the policy. + // value contains the amount of change which is permitted by the policy. // It must be greater than zero optional int32 value = 2; - // PeriodSeconds specifies the window of time for which the policy should hold true. + // periodSeconds specifies the window of time for which the policy should hold true. // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). optional int32 periodSeconds = 3; } @@ -119,7 +119,7 @@ message HPAScalingPolicy { // number of replicas is not set instantly, instead, the safest value from the stabilization // window is chosen. message HPAScalingRules { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // stabilizationWindowSeconds is the number of seconds for which past recommendations should be // considered while scaling up or scaling down. // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). // If not set, use the default values: @@ -495,7 +495,7 @@ message ResourceMetricSource { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. message ResourceMetricStatus { - // Name is the name of the resource in question. + // name is the name of the resource in question. optional string name = 1; // current contains the current value for the given metric diff --git a/vendor/k8s.io/api/autoscaling/v2/types.go b/vendor/k8s.io/api/autoscaling/v2/types.go index 9b2dc36e3..c12a83df1 100644 --- a/vendor/k8s.io/api/autoscaling/v2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2/types.go @@ -59,9 +59,11 @@ type HorizontalPodAutoscalerSpec struct { // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. // It cannot be less that minReplicas. MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + // metrics contains the specifications for which to use to calculate the // desired replica count (the maximum replica count across all metrics will // be used). The desired replica count is calculated multiplying the @@ -83,11 +85,13 @@ type HorizontalPodAutoscalerSpec struct { // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // API version of the referent + + // apiVersion is the API version of the referent // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` } @@ -105,11 +109,13 @@ type MetricSpec struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -117,6 +123,7 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in // each pod of the current scale target (e.g. CPU or memory). Such metrics are @@ -125,6 +132,7 @@ type MetricSpec struct { // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -144,6 +152,7 @@ type HorizontalPodAutoscalerBehavior struct { // No stabilization is used. // +optional ScaleUp *HPAScalingRules `json:"scaleUp,omitempty" protobuf:"bytes,1,opt,name=scaleUp"` + // scaleDown is scaling policy for scaling Down. // If not set, the default value is to allow to scale down to minReplicas pods, with a // 300 second stabilization window (i.e., the highest recommendation for @@ -171,7 +180,7 @@ const ( // number of replicas is not set instantly, instead, the safest value from the stabilization // window is chosen. type HPAScalingRules struct { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // stabilizationWindowSeconds is the number of seconds for which past recommendations should be // considered while scaling up or scaling down. // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). // If not set, use the default values: @@ -179,10 +188,12 @@ type HPAScalingRules struct { // - For scale down: 300 (i.e. the stabilization window is 300 seconds long). // +optional StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty" protobuf:"varint,3,opt,name=stabilizationWindowSeconds"` + // selectPolicy is used to specify which policy should be used. // If not set, the default value Max is used. // +optional SelectPolicy *ScalingPolicySelect `json:"selectPolicy,omitempty" protobuf:"bytes,1,opt,name=selectPolicy"` + // policies is a list of potential scaling polices which can be used during scaling. // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid // +listType=atomic @@ -203,12 +214,14 @@ const ( // HPAScalingPolicy is a single policy which must hold true for a specified past interval. type HPAScalingPolicy struct { - // Type is used to specify the scaling policy. + // type is used to specify the scaling policy. Type HPAScalingPolicyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=HPAScalingPolicyType"` - // Value contains the amount of change which is permitted by the policy. + + // value contains the amount of change which is permitted by the policy. // It must be greater than zero Value int32 `json:"value" protobuf:"varint,2,opt,name=value"` - // PeriodSeconds specifies the window of time for which the policy should hold true. + + // periodSeconds specifies the window of time for which the policy should hold true. // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). PeriodSeconds int32 `json:"periodSeconds" protobuf:"varint,3,opt,name=periodSeconds"` } @@ -249,8 +262,10 @@ const ( type ObjectMetricSource struct { // describedObject specifies the descriptions of a object,such as kind,name apiVersion DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,1,name=describedObject"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,3,name=metric"` } @@ -262,6 +277,7 @@ type ObjectMetricSource struct { type PodsMetricSource struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -276,6 +292,7 @@ type PodsMetricSource struct { type ResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -290,8 +307,10 @@ type ResourceMetricSource struct { type ContainerResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // container is the name of the container in the pods of the scaling target Container string `json:"container" protobuf:"bytes,3,opt,name=container"` } @@ -302,6 +321,7 @@ type ContainerResourceMetricSource struct { type ExternalMetricSource struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -310,6 +330,7 @@ type ExternalMetricSource struct { type MetricIdentifier struct { // name is the name of the given metric Name string `json:"name" protobuf:"bytes,1,name=name"` + // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. @@ -321,13 +342,16 @@ type MetricIdentifier struct { type MetricTarget struct { // type represents whether the metric type is Utilization, Value, or AverageValue Type MetricTargetType `json:"type" protobuf:"bytes,1,name=type"` + // value is the target value of the metric (as a quantity). // +optional Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,3,opt,name=averageValue"` + // averageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. @@ -405,15 +429,19 @@ const ( type HorizontalPodAutoscalerCondition struct { // type describes the current condition Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about // the transition // +optional @@ -432,11 +460,13 @@ type MetricStatus struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -444,6 +474,7 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -451,6 +482,7 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -465,8 +497,10 @@ type MetricStatus struct { type ObjectMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` + // DescribedObject specifies the descriptions of a object,such as kind,name apiVersion DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,3,name=describedObject"` } @@ -476,6 +510,7 @@ type ObjectMetricStatus struct { type PodsMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -486,8 +521,9 @@ type PodsMetricStatus struct { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. type ResourceMetricStatus struct { - // Name is the name of the resource in question. + // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -498,11 +534,13 @@ type ResourceMetricStatus struct { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. type ContainerResourceMetricStatus struct { - // Name is the name of the resource in question. + // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` - // Container is the name of the container in the pods of the scaling target + + // container is the name of the container in the pods of the scaling target Container string `json:"container" protobuf:"bytes,3,opt,name=container"` } @@ -511,6 +549,7 @@ type ContainerResourceMetricStatus struct { type ExternalMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -520,10 +559,12 @@ type MetricValueStatus struct { // value is the current value of the metric (as a quantity). // +optional Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,1,opt,name=value"` + // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,2,opt,name=averageValue"` + // currentAverageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. diff --git a/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go index 41ab32a4c..1941b1ef5 100644 --- a/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ @@ -40,9 +40,9 @@ func (ContainerResourceMetricSource) SwaggerDoc() map[string]string { var map_ContainerResourceMetricStatus = map[string]string{ "": "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "Name is the name of the resource in question.", + "name": "name is the name of the resource in question.", "current": "current contains the current value for the given metric", - "container": "Container is the name of the container in the pods of the scaling target", + "container": "container is the name of the container in the pods of the scaling target", } func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { @@ -51,9 +51,9 @@ func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "apiVersion": "API version of the referent", + "kind": "kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "name": "name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "apiVersion": "apiVersion is the API version of the referent", } func (CrossVersionObjectReference) SwaggerDoc() map[string]string { @@ -82,9 +82,9 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HPAScalingPolicy = map[string]string{ "": "HPAScalingPolicy is a single policy which must hold true for a specified past interval.", - "type": "Type is used to specify the scaling policy.", - "value": "Value contains the amount of change which is permitted by the policy. It must be greater than zero", - "periodSeconds": "PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", + "type": "type is used to specify the scaling policy.", + "value": "value contains the amount of change which is permitted by the policy. It must be greater than zero", + "periodSeconds": "periodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", } func (HPAScalingPolicy) SwaggerDoc() map[string]string { @@ -93,7 +93,7 @@ func (HPAScalingPolicy) SwaggerDoc() map[string]string { var map_HPAScalingRules = map[string]string{ "": "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.", - "stabilizationWindowSeconds": "StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", + "stabilizationWindowSeconds": "stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", "selectPolicy": "selectPolicy is used to specify which policy should be used. If not set, the default value Max is used.", "policies": "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid", } @@ -288,7 +288,7 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string { var map_ResourceMetricStatus = map[string]string{ "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "Name is the name of the resource in question.", + "name": "name is the name of the resource in question.", "current": "current contains the current value for the given metric", } diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto index 33d27a962..6b3d41521 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -89,7 +89,7 @@ message CrossVersionObjectReference { // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // Name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names optional string name = 2; // API version of the referent diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go index c1480ab39..842284072 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types.go @@ -26,7 +26,7 @@ import ( type CrossVersionObjectReference struct { // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // Name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` // API version of the referent // +optional diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go index 6f555487d..d656ee416 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v2beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ @@ -54,7 +54,7 @@ func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "name": "Name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "apiVersion": "API version of the referent", } diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto index 1bafbf6c7..5b2fe9442 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto @@ -54,25 +54,25 @@ message ContainerResourceMetricSource { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. message ContainerResourceMetricStatus { - // Name is the name of the resource in question. + // name is the name of the resource in question. optional string name = 1; // current contains the current value for the given metric optional MetricValueStatus current = 2; - // Container is the name of the container in the pods of the scaling target + // container is the name of the container in the pods of the scaling target optional string container = 3; } // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names optional string name = 2; - // API version of the referent + // apiVersion is the API version of the referent // +optional optional string apiVersion = 3; } @@ -100,14 +100,14 @@ message ExternalMetricStatus { // HPAScalingPolicy is a single policy which must hold true for a specified past interval. message HPAScalingPolicy { - // Type is used to specify the scaling policy. + // type is used to specify the scaling policy. optional string type = 1; - // Value contains the amount of change which is permitted by the policy. + // value contains the amount of change which is permitted by the policy. // It must be greater than zero optional int32 value = 2; - // PeriodSeconds specifies the window of time for which the policy should hold true. + // periodSeconds specifies the window of time for which the policy should hold true. // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). optional int32 periodSeconds = 3; } @@ -119,7 +119,7 @@ message HPAScalingPolicy { // number of replicas is not set instantly, instead, the safest value from the stabilization // window is chosen. message HPAScalingRules { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // stabilizationWindowSeconds is the number of seconds for which past recommendations should be // considered while scaling up or scaling down. // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). // If not set, use the default values: @@ -361,7 +361,7 @@ message MetricStatus { // +optional optional ResourceMetricStatus resource = 4; - // container resource refers to a resource metric (such as those specified in + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available @@ -411,7 +411,7 @@ message MetricValueStatus { // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; - // currentAverageUtilization is the current value of the average of the + // averageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. // +optional @@ -485,7 +485,7 @@ message ResourceMetricSource { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. message ResourceMetricStatus { - // Name is the name of the resource in question. + // name is the name of the resource in question. optional string name = 1; // current contains the current value for the given metric diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go index 60da3ba04..b0b7681c0 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go @@ -62,9 +62,11 @@ type HorizontalPodAutoscalerSpec struct { // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. // It cannot be less that minReplicas. MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + // metrics contains the specifications for which to use to calculate the // desired replica count (the maximum replica count across all metrics will // be used). The desired replica count is calculated multiplying the @@ -85,11 +87,13 @@ type HorizontalPodAutoscalerSpec struct { // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // API version of the referent + + // apiVersion is the API version of the referent // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` } @@ -107,11 +111,13 @@ type MetricSpec struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -119,6 +125,7 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in // each pod of the current scale target (e.g. CPU or memory). Such metrics are @@ -127,6 +134,7 @@ type MetricSpec struct { // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -146,6 +154,7 @@ type HorizontalPodAutoscalerBehavior struct { // No stabilization is used. // +optional ScaleUp *HPAScalingRules `json:"scaleUp,omitempty" protobuf:"bytes,1,opt,name=scaleUp"` + // scaleDown is scaling policy for scaling Down. // If not set, the default value is to allow to scale down to minReplicas pods, with a // 300 second stabilization window (i.e., the highest recommendation for @@ -173,7 +182,7 @@ const ( // number of replicas is not set instantly, instead, the safest value from the stabilization // window is chosen. type HPAScalingRules struct { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // stabilizationWindowSeconds is the number of seconds for which past recommendations should be // considered while scaling up or scaling down. // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). // If not set, use the default values: @@ -181,10 +190,12 @@ type HPAScalingRules struct { // - For scale down: 300 (i.e. the stabilization window is 300 seconds long). // +optional StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty" protobuf:"varint,3,opt,name=stabilizationWindowSeconds"` + // selectPolicy is used to specify which policy should be used. // If not set, the default value MaxPolicySelect is used. // +optional SelectPolicy *ScalingPolicySelect `json:"selectPolicy,omitempty" protobuf:"bytes,1,opt,name=selectPolicy"` + // policies is a list of potential scaling polices which can be used during scaling. // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid // +optional @@ -204,12 +215,14 @@ const ( // HPAScalingPolicy is a single policy which must hold true for a specified past interval. type HPAScalingPolicy struct { - // Type is used to specify the scaling policy. + // type is used to specify the scaling policy. Type HPAScalingPolicyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=HPAScalingPolicyType"` - // Value contains the amount of change which is permitted by the policy. + + // value contains the amount of change which is permitted by the policy. // It must be greater than zero Value int32 `json:"value" protobuf:"varint,2,opt,name=value"` - // PeriodSeconds specifies the window of time for which the policy should hold true. + + // periodSeconds specifies the window of time for which the policy should hold true. // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). PeriodSeconds int32 `json:"periodSeconds" protobuf:"varint,3,opt,name=periodSeconds"` } @@ -251,6 +264,7 @@ type ObjectMetricSource struct { DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,1,name=describedObject"` // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,3,name=metric"` } @@ -262,6 +276,7 @@ type ObjectMetricSource struct { type PodsMetricSource struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -276,6 +291,7 @@ type PodsMetricSource struct { type ResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -290,8 +306,10 @@ type ResourceMetricSource struct { type ContainerResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // container is the name of the container in the pods of the scaling target Container string `json:"container" protobuf:"bytes,3,opt,name=container"` } @@ -302,6 +320,7 @@ type ContainerResourceMetricSource struct { type ExternalMetricSource struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -310,6 +329,7 @@ type ExternalMetricSource struct { type MetricIdentifier struct { // name is the name of the given metric Name string `json:"name" protobuf:"bytes,1,name=name"` + // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. @@ -321,13 +341,16 @@ type MetricIdentifier struct { type MetricTarget struct { // type represents whether the metric type is Utilization, Value, or AverageValue Type MetricTargetType `json:"type" protobuf:"bytes,1,name=type"` + // value is the target value of the metric (as a quantity). // +optional Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,3,opt,name=averageValue"` + // averageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. @@ -399,15 +422,19 @@ const ( type HorizontalPodAutoscalerCondition struct { // type describes the current condition Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about // the transition // +optional @@ -426,6 +453,7 @@ type MetricStatus struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. @@ -438,13 +466,15 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` - // container resource refers to a resource metric (such as those specified in + + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics using the "pods" source. // +optional ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -459,6 +489,7 @@ type MetricStatus struct { type ObjectMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` @@ -470,6 +501,7 @@ type ObjectMetricStatus struct { type PodsMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -480,8 +512,9 @@ type PodsMetricStatus struct { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. type ResourceMetricStatus struct { - // Name is the name of the resource in question. + // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -492,11 +525,13 @@ type ResourceMetricStatus struct { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. type ContainerResourceMetricStatus struct { - // Name is the name of the resource in question. + // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` - // Container is the name of the container in the pods of the scaling target + + // container is the name of the container in the pods of the scaling target Container string `json:"container" protobuf:"bytes,3,opt,name=container"` } @@ -505,6 +540,7 @@ type ContainerResourceMetricStatus struct { type ExternalMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -514,11 +550,13 @@ type MetricValueStatus struct { // value is the current value of the metric (as a quantity). // +optional Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,1,opt,name=value"` + // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,2,opt,name=averageValue"` - // currentAverageUtilization is the current value of the average of the + + // averageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. // +optional diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go index cb92e9e34..4af7d0ec0 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v2beta2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ @@ -40,9 +40,9 @@ func (ContainerResourceMetricSource) SwaggerDoc() map[string]string { var map_ContainerResourceMetricStatus = map[string]string{ "": "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "Name is the name of the resource in question.", + "name": "name is the name of the resource in question.", "current": "current contains the current value for the given metric", - "container": "Container is the name of the container in the pods of the scaling target", + "container": "container is the name of the container in the pods of the scaling target", } func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { @@ -51,9 +51,9 @@ func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "apiVersion": "API version of the referent", + "kind": "kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "name": "name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "apiVersion": "apiVersion is the API version of the referent", } func (CrossVersionObjectReference) SwaggerDoc() map[string]string { @@ -82,9 +82,9 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HPAScalingPolicy = map[string]string{ "": "HPAScalingPolicy is a single policy which must hold true for a specified past interval.", - "type": "Type is used to specify the scaling policy.", - "value": "Value contains the amount of change which is permitted by the policy. It must be greater than zero", - "periodSeconds": "PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", + "type": "type is used to specify the scaling policy.", + "value": "value contains the amount of change which is permitted by the policy. It must be greater than zero", + "periodSeconds": "periodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", } func (HPAScalingPolicy) SwaggerDoc() map[string]string { @@ -93,7 +93,7 @@ func (HPAScalingPolicy) SwaggerDoc() map[string]string { var map_HPAScalingRules = map[string]string{ "": "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.", - "stabilizationWindowSeconds": "StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", + "stabilizationWindowSeconds": "stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", "selectPolicy": "selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used.", "policies": "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid", } @@ -203,7 +203,7 @@ var map_MetricStatus = map[string]string{ "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } @@ -227,7 +227,7 @@ var map_MetricValueStatus = map[string]string{ "": "MetricValueStatus holds the current value for a metric", "value": "value is the current value of the metric (as a quantity).", "averageValue": "averageValue is the current value of the average of the metric across all relevant pods (as a quantity)", - "averageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", + "averageUtilization": "averageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", } func (MetricValueStatus) SwaggerDoc() map[string]string { @@ -286,7 +286,7 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string { var map_ResourceMetricStatus = map[string]string{ "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "Name is the name of the resource in question.", + "name": "name is the name of the resource in question.", "current": "current contains the current value for the given metric", } diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index 74ccac921..df4381c73 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -72,7 +72,6 @@ message CronJobSpec { // configuration, the controller will stop creating new new Jobs and will create a system event with the // reason UnknownTimeZone. // More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones - // This is beta field and must be enabled via the `CronJobTimeZone` feature gate. // +optional optional string timeZone = 8; @@ -83,6 +82,7 @@ message CronJobSpec { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one @@ -189,7 +189,7 @@ message JobSpec { optional int32 parallelism = 1; // Specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any + // job should be run with. Setting to null means that the success of any // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. @@ -213,8 +213,8 @@ message JobSpec { // checked against the backoffLimit. This field cannot be used in combination // with restartPolicy=OnFailure. // - // This field is alpha-level. To use this field, you must enable the - // `JobPodFailurePolicy` feature gate (disabled by default). + // This field is beta-level. It can be used when the `JobPodFailurePolicy` + // feature gate is enabled (enabled by default). // +optional optional PodFailurePolicy podFailurePolicy = 11; @@ -243,6 +243,7 @@ message JobSpec { optional bool manualSelector = 5; // Describes the pod that will be created when executing a job. + // The only allowed template.spec.restartPolicy values are "Never" or "OnFailure". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ optional k8s.io.api.core.v1.PodTemplateSpec template = 6; @@ -256,7 +257,7 @@ message JobSpec { // +optional optional int32 ttlSecondsAfterFinished = 8; - // CompletionMode specifies how Pod completions are tracked. It can be + // completionMode specifies how Pod completions are tracked. It can be // `NonIndexed` (default) or `Indexed`. // // `NonIndexed` means that the Job is considered complete when there have @@ -281,7 +282,7 @@ message JobSpec { // +optional optional string completionMode = 9; - // Suspend specifies whether the Job controller should create Pods or not. If + // suspend specifies whether the Job controller should create Pods or not. If // a Job is created with suspend set to true, no Pods are created by the Job // controller. If a Job is suspended after creation (i.e. the flag goes from // false to true), the Job controller will delete all active Pods associated @@ -334,7 +335,7 @@ message JobStatus { // +optional optional int32 failed = 6; - // CompletedIndexes holds the completed indexes when .spec.completionMode = + // completedIndexes holds the completed indexes when .spec.completionMode = // "Indexed" in a text format. The indexes are represented as decimal integers // separated by commas. The numbers are listed in increasing order. Three or // more consecutive numbers are compressed and represented by the first and @@ -344,15 +345,16 @@ message JobStatus { // +optional optional string completedIndexes = 7; - // UncountedTerminatedPods holds the UIDs of Pods that have terminated but + // uncountedTerminatedPods holds the UIDs of Pods that have terminated but // the job controller hasn't yet accounted for in the status counters. // // The job controller creates pods with a finalizer. When a pod terminates // (succeeded or failed), the controller does three steps to account for it // in the job status: - // (1) Add the pod UID to the arrays in this field. - // (2) Remove the pod finalizer. - // (3) Remove the pod UID from the arrays while increasing the corresponding + // + // 1. Add the pod UID to the arrays in this field. + // 2. Remove the pod finalizer. + // 3. Remove the pod UID from the arrays while increasing the corresponding // counter. // // Old jobs might not be tracked using this field, in which case the field @@ -409,6 +411,7 @@ message PodFailurePolicyOnExitCodesRequirement { // Represents the relationship between the container exit code(s) and the // specified values. Containers completed with success (exit code 0) are // excluded from the requirement check. Possible values are: + // // - In: the requirement is satisfied if at least one container exit code // (might be multiple if there are multiple containers not restricted // by the 'containerName' field) is in the set of specified values. @@ -442,10 +445,11 @@ message PodFailurePolicyOnPodConditionsPattern { } // PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. -// One of OnExitCodes and onPodConditions, but not both, can be used in each rule. +// One of onExitCodes and onPodConditions, but not both, can be used in each rule. message PodFailurePolicyRule { // Specifies the action taken on a pod failure when the requirements are satisfied. // Possible values are: + // // - FailJob: indicates that the pod's job is marked as Failed and all // running pods are terminated. // - Ignore: indicates that the counter towards the .backoffLimit is not @@ -470,12 +474,12 @@ message PodFailurePolicyRule { // UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't // been accounted in Job status counters. message UncountedTerminatedPods { - // Succeeded holds UIDs of succeeded Pods. + // succeeded holds UIDs of succeeded Pods. // +listType=set // +optional repeated string succeeded = 1; - // Failed holds UIDs of failed Pods. + // failed holds UIDs of failed Pods. // +listType=set // +optional repeated string failed = 2; diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index dcb15728f..22cf9ee9c 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -23,8 +23,11 @@ import ( ) const ( - JobCompletionIndexAnnotation = "batch.kubernetes.io/job-completion-index" + // All Kubernetes labels need to be prefixed with Kubernetes to distinguish them from end-user labels + // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#label-selector-and-annotation-conventions + labelPrefix = "batch.kubernetes.io/" + JobCompletionIndexAnnotation = labelPrefix + "job-completion-index" // JobTrackingFinalizer is a finalizer for Job's pods. It prevents them from // being deleted before being accounted in the Job status. // @@ -34,7 +37,14 @@ const ( // 1.27+, one release after JobTrackingWithFinalizers graduates to GA, the // apiserver and job controller will ignore this annotation and they will // always track jobs using finalizers. - JobTrackingFinalizer = "batch.kubernetes.io/job-tracking" + JobTrackingFinalizer = labelPrefix + "job-tracking" + // The Job labels will use batch.kubernetes.io as a prefix for all labels + // Historically the job controller uses unprefixed labels for job-name and controller-uid and + // Kubernetes continutes to recognize those unprefixed labels for consistency. + JobNameLabel = labelPrefix + "job-name" + // ControllerUid is used to programatically get pods corresponding to a Job. + // There is a corresponding label without the batch.kubernetes.io that we support for legacy reasons. + ControllerUidLabel = labelPrefix + "controller-uid" ) // +genclient @@ -135,6 +145,7 @@ type PodFailurePolicyOnExitCodesRequirement struct { // Represents the relationship between the container exit code(s) and the // specified values. Containers completed with success (exit code 0) are // excluded from the requirement check. Possible values are: + // // - In: the requirement is satisfied if at least one container exit code // (might be multiple if there are multiple containers not restricted // by the 'containerName' field) is in the set of specified values. @@ -168,10 +179,11 @@ type PodFailurePolicyOnPodConditionsPattern struct { } // PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. -// One of OnExitCodes and onPodConditions, but not both, can be used in each rule. +// One of onExitCodes and onPodConditions, but not both, can be used in each rule. type PodFailurePolicyRule struct { // Specifies the action taken on a pod failure when the requirements are satisfied. // Possible values are: + // // - FailJob: indicates that the pod's job is marked as Failed and all // running pods are terminated. // - Ignore: indicates that the counter towards the .backoffLimit is not @@ -216,7 +228,7 @@ type JobSpec struct { Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` // Specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any + // job should be run with. Setting to null means that the success of any // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. @@ -240,8 +252,8 @@ type JobSpec struct { // checked against the backoffLimit. This field cannot be used in combination // with restartPolicy=OnFailure. // - // This field is alpha-level. To use this field, you must enable the - // `JobPodFailurePolicy` feature gate (disabled by default). + // This field is beta-level. It can be used when the `JobPodFailurePolicy` + // feature gate is enabled (enabled by default). // +optional PodFailurePolicy *PodFailurePolicy `json:"podFailurePolicy,omitempty" protobuf:"bytes,11,opt,name=podFailurePolicy"` @@ -275,6 +287,7 @@ type JobSpec struct { ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` // Describes the pod that will be created when executing a job. + // The only allowed template.spec.restartPolicy values are "Never" or "OnFailure". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ Template corev1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` @@ -288,7 +301,7 @@ type JobSpec struct { // +optional TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty" protobuf:"varint,8,opt,name=ttlSecondsAfterFinished"` - // CompletionMode specifies how Pod completions are tracked. It can be + // completionMode specifies how Pod completions are tracked. It can be // `NonIndexed` (default) or `Indexed`. // // `NonIndexed` means that the Job is considered complete when there have @@ -313,7 +326,7 @@ type JobSpec struct { // +optional CompletionMode *CompletionMode `json:"completionMode,omitempty" protobuf:"bytes,9,opt,name=completionMode,casttype=CompletionMode"` - // Suspend specifies whether the Job controller should create Pods or not. If + // suspend specifies whether the Job controller should create Pods or not. If // a Job is created with suspend set to true, no Pods are created by the Job // controller. If a Job is suspended after creation (i.e. the flag goes from // false to true), the Job controller will delete all active Pods associated @@ -366,7 +379,7 @@ type JobStatus struct { // +optional Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` - // CompletedIndexes holds the completed indexes when .spec.completionMode = + // completedIndexes holds the completed indexes when .spec.completionMode = // "Indexed" in a text format. The indexes are represented as decimal integers // separated by commas. The numbers are listed in increasing order. Three or // more consecutive numbers are compressed and represented by the first and @@ -376,15 +389,16 @@ type JobStatus struct { // +optional CompletedIndexes string `json:"completedIndexes,omitempty" protobuf:"bytes,7,opt,name=completedIndexes"` - // UncountedTerminatedPods holds the UIDs of Pods that have terminated but + // uncountedTerminatedPods holds the UIDs of Pods that have terminated but // the job controller hasn't yet accounted for in the status counters. // // The job controller creates pods with a finalizer. When a pod terminates // (succeeded or failed), the controller does three steps to account for it // in the job status: - // (1) Add the pod UID to the arrays in this field. - // (2) Remove the pod finalizer. - // (3) Remove the pod UID from the arrays while increasing the corresponding + // + // 1. Add the pod UID to the arrays in this field. + // 2. Remove the pod finalizer. + // 3. Remove the pod UID from the arrays while increasing the corresponding // counter. // // Old jobs might not be tracked using this field, in which case the field @@ -403,12 +417,12 @@ type JobStatus struct { // UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't // been accounted in Job status counters. type UncountedTerminatedPods struct { - // Succeeded holds UIDs of succeeded Pods. + // succeeded holds UIDs of succeeded Pods. // +listType=set // +optional Succeeded []types.UID `json:"succeeded,omitempty" protobuf:"bytes,1,rep,name=succeeded,casttype=k8s.io/apimachinery/pkg/types.UID"` - // Failed holds UIDs of failed Pods. + // failed holds UIDs of failed Pods. // +listType=set // +optional Failed []types.UID `json:"failed,omitempty" protobuf:"bytes,2,rep,name=failed,casttype=k8s.io/apimachinery/pkg/types.UID"` @@ -513,7 +527,6 @@ type CronJobSpec struct { // configuration, the controller will stop creating new new Jobs and will create a system event with the // reason UnknownTimeZone. // More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones - // This is beta field and must be enabled via the `CronJobTimeZone` feature gate. // +optional TimeZone *string `json:"timeZone,omitempty" protobuf:"bytes,8,opt,name=timeZone"` @@ -524,6 +537,7 @@ type CronJobSpec struct { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index 89470dcc6..f6f3141f1 100644 --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CronJob = map[string]string{ @@ -51,9 +51,9 @@ func (CronJobList) SwaggerDoc() map[string]string { var map_CronJobSpec = map[string]string{ "": "CronJobSpec describes how the job execution will look like and when it will actually run.", "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", - "timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones This is beta field and must be enabled via the `CronJobTimeZone` feature gate.", + "timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones", "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", + "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are:\n\n- \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", "jobTemplate": "Specifies the job that will be created when executing a CronJob.", "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. Value must be non-negative integer. Defaults to 3.", @@ -113,16 +113,16 @@ func (JobList) SwaggerDoc() map[string]string { var map_JobSpec = map[string]string{ "": "JobSpec describes how the job execution will look like.", "parallelism": "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", - "completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.", - "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is alpha-level. To use this field, you must enable the `JobPodFailurePolicy` feature gate (disabled by default).", + "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default).", "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", "selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", - "template": "Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "template": "Describes the pod that will be created when executing a job. The only allowed template.spec.restartPolicy values are \"Never\" or \"OnFailure\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.", - "completionMode": "CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", - "suspend": "Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", + "completionMode": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", + "suspend": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", } func (JobSpec) SwaggerDoc() map[string]string { @@ -137,8 +137,8 @@ var map_JobStatus = map[string]string{ "active": "The number of pending and running pods.", "succeeded": "The number of pods which reached phase Succeeded.", "failed": "The number of pods which reached phase Failed.", - "completedIndexes": "CompletedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", - "uncountedTerminatedPods": "UncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status: (1) Add the pod UID to the arrays in this field. (2) Remove the pod finalizer. (3) Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null.", + "completedIndexes": "completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", + "uncountedTerminatedPods": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null.", "ready": "The number of pods which have a Ready condition.\n\nThis field is beta-level. The job controller populates the field when the feature gate JobReadyPods is enabled (enabled by default).", } @@ -168,7 +168,7 @@ func (PodFailurePolicy) SwaggerDoc() map[string]string { var map_PodFailurePolicyOnExitCodesRequirement = map[string]string{ "": "PodFailurePolicyOnExitCodesRequirement describes the requirement for handling a failed pod based on its container exit codes. In particular, it lookups the .state.terminated.exitCode for each app container and init container status, represented by the .status.containerStatuses and .status.initContainerStatuses fields in the Pod status, respectively. Containers completed with success (exit code 0) are excluded from the requirement check.", "containerName": "Restricts the check for exit codes to the container with the specified name. When null, the rule applies to all containers. When specified, it should match one the container or initContainer names in the pod template.", - "operator": "Represents the relationship between the container exit code(s) and the specified values. Containers completed with success (exit code 0) are excluded from the requirement check. Possible values are: - In: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is in the set of specified values.\n- NotIn: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is not in the set of specified values.\nAdditional values are considered to be added in the future. Clients should react to an unknown operator by assuming the requirement is not satisfied.", + "operator": "Represents the relationship between the container exit code(s) and the specified values. Containers completed with success (exit code 0) are excluded from the requirement check. Possible values are:\n\n- In: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is in the set of specified values.\n- NotIn: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is not in the set of specified values.\nAdditional values are considered to be added in the future. Clients should react to an unknown operator by assuming the requirement is not satisfied.", "values": "Specifies the set of values. Each returned container exit code (might be multiple in case of multiple containers) is checked against this set of values with respect to the operator. The list of values must be ordered and must not contain duplicates. Value '0' cannot be used for the In operator. At least one element is required. At most 255 elements are allowed.", } @@ -187,8 +187,8 @@ func (PodFailurePolicyOnPodConditionsPattern) SwaggerDoc() map[string]string { } var map_PodFailurePolicyRule = map[string]string{ - "": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of OnExitCodes and onPodConditions, but not both, can be used in each rule.", - "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are: - FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", + "": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule.", + "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", "onExitCodes": "Represents the requirement on the container exit codes.", "onPodConditions": "Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed.", } @@ -199,8 +199,8 @@ func (PodFailurePolicyRule) SwaggerDoc() map[string]string { var map_UncountedTerminatedPods = map[string]string{ "": "UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.", - "succeeded": "Succeeded holds UIDs of succeeded Pods.", - "failed": "Failed holds UIDs of failed Pods.", + "succeeded": "succeeded holds UIDs of succeeded Pods.", + "failed": "failed holds UIDs of failed Pods.", } func (UncountedTerminatedPods) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go index d042fc695..03feb2cea 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go @@ -157,38 +157,10 @@ func (m *CronJobStatus) XXX_DiscardUnknown() { var xxx_messageInfo_CronJobStatus proto.InternalMessageInfo -func (m *JobTemplate) Reset() { *m = JobTemplate{} } -func (*JobTemplate) ProtoMessage() {} -func (*JobTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{4} -} -func (m *JobTemplate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JobTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *JobTemplate) XXX_Merge(src proto.Message) { - xxx_messageInfo_JobTemplate.Merge(m, src) -} -func (m *JobTemplate) XXX_Size() int { - return m.Size() -} -func (m *JobTemplate) XXX_DiscardUnknown() { - xxx_messageInfo_JobTemplate.DiscardUnknown(m) -} - -var xxx_messageInfo_JobTemplate proto.InternalMessageInfo - func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } func (*JobTemplateSpec) ProtoMessage() {} func (*JobTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{5} + return fileDescriptor_e57b277b05179ae7, []int{4} } func (m *JobTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +190,6 @@ func init() { proto.RegisterType((*CronJobList)(nil), "k8s.io.api.batch.v1beta1.CronJobList") proto.RegisterType((*CronJobSpec)(nil), "k8s.io.api.batch.v1beta1.CronJobSpec") proto.RegisterType((*CronJobStatus)(nil), "k8s.io.api.batch.v1beta1.CronJobStatus") - proto.RegisterType((*JobTemplate)(nil), "k8s.io.api.batch.v1beta1.JobTemplate") proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v1beta1.JobTemplateSpec") } @@ -227,58 +198,57 @@ func init() { } var fileDescriptor_e57b277b05179ae7 = []byte{ - // 814 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0x41, 0x6f, 0x1b, 0x45, - 0x14, 0xc7, 0xbd, 0x4e, 0x9c, 0xb8, 0xe3, 0x16, 0xd2, 0x01, 0xa5, 0x2b, 0x83, 0xd6, 0xc1, 0x55, - 0x85, 0x41, 0x30, 0x4b, 0x22, 0x84, 0x38, 0x55, 0xea, 0x16, 0x15, 0x08, 0x41, 0x45, 0xe3, 0x22, - 0xa4, 0xaa, 0x42, 0x9d, 0x1d, 0xbf, 0x38, 0xd3, 0x78, 0x77, 0x56, 0x3b, 0xb3, 0x91, 0x72, 0xe3, - 0xc2, 0x9d, 0xef, 0xc2, 0x9d, 0x73, 0x8e, 0xbd, 0xd1, 0xd3, 0x8a, 0x2c, 0xdf, 0x82, 0x13, 0x9a, - 0xf1, 0x7a, 0xed, 0xda, 0xeb, 0xa6, 0xbd, 0xf4, 0xe6, 0x79, 0xf3, 0xff, 0xff, 0xe6, 0xed, 0x7b, - 0x6f, 0x67, 0x8d, 0xee, 0x9d, 0x7e, 0xad, 0x88, 0x90, 0xfe, 0x69, 0x16, 0x42, 0x1a, 0x83, 0x06, - 0xe5, 0x9f, 0x41, 0x3c, 0x92, 0xa9, 0x5f, 0x6e, 0xb0, 0x44, 0xf8, 0x21, 0xd3, 0xfc, 0xc4, 0x3f, - 0xdb, 0x0f, 0x41, 0xb3, 0x7d, 0x7f, 0x0c, 0x31, 0xa4, 0x4c, 0xc3, 0x88, 0x24, 0xa9, 0xd4, 0x12, - 0xbb, 0x53, 0x25, 0x61, 0x89, 0x20, 0x56, 0x49, 0x4a, 0x65, 0xf7, 0xf3, 0xb1, 0xd0, 0x27, 0x59, - 0x48, 0xb8, 0x8c, 0xfc, 0xb1, 0x1c, 0x4b, 0xdf, 0x1a, 0xc2, 0xec, 0xd8, 0xae, 0xec, 0xc2, 0xfe, - 0x9a, 0x82, 0xba, 0xb7, 0x6b, 0x8e, 0x5c, 0x3e, 0xad, 0xdb, 0x5f, 0x10, 0x71, 0x99, 0x42, 0x9d, - 0xe6, 0xcb, 0xb9, 0x26, 0x62, 0xfc, 0x44, 0xc4, 0x90, 0x9e, 0xfb, 0xc9, 0xe9, 0xd8, 0x04, 0x94, - 0x1f, 0x81, 0x66, 0x75, 0x2e, 0x7f, 0x9d, 0x2b, 0xcd, 0x62, 0x2d, 0x22, 0x58, 0x31, 0x7c, 0x75, - 0x95, 0x41, 0xf1, 0x13, 0x88, 0xd8, 0xb2, 0xaf, 0xff, 0x7b, 0x13, 0x6d, 0xdf, 0x4f, 0x65, 0x7c, - 0x28, 0x43, 0xfc, 0x14, 0xb5, 0x4d, 0x3e, 0x23, 0xa6, 0x99, 0xeb, 0xec, 0x39, 0x83, 0xce, 0xc1, - 0x17, 0x64, 0x5e, 0xcf, 0x0a, 0x4b, 0x92, 0xd3, 0xb1, 0x09, 0x28, 0x62, 0xd4, 0xe4, 0x6c, 0x9f, - 0x3c, 0x0c, 0x9f, 0x01, 0xd7, 0x3f, 0x82, 0x66, 0x01, 0xbe, 0xc8, 0x7b, 0x8d, 0x22, 0xef, 0xa1, - 0x79, 0x8c, 0x56, 0x54, 0xfc, 0x2d, 0xda, 0x54, 0x09, 0x70, 0xb7, 0x69, 0xe9, 0x77, 0xc8, 0xba, - 0x6e, 0x91, 0x32, 0xa5, 0x61, 0x02, 0x3c, 0xb8, 0x5e, 0x22, 0x37, 0xcd, 0x8a, 0x5a, 0x00, 0x7e, - 0x88, 0xb6, 0x94, 0x66, 0x3a, 0x53, 0xee, 0x86, 0x45, 0x7d, 0x7c, 0x35, 0xca, 0xca, 0x83, 0x77, - 0x4a, 0xd8, 0xd6, 0x74, 0x4d, 0x4b, 0x4c, 0xff, 0x4f, 0x07, 0x75, 0x4a, 0xe5, 0x91, 0x50, 0x1a, - 0x3f, 0x59, 0xa9, 0x05, 0x79, 0xbd, 0x5a, 0x18, 0xb7, 0xad, 0xc4, 0x4e, 0x79, 0x52, 0x7b, 0x16, - 0x59, 0xa8, 0xc3, 0x03, 0xd4, 0x12, 0x1a, 0x22, 0xe5, 0x36, 0xf7, 0x36, 0x06, 0x9d, 0x83, 0x8f, - 0xae, 0xcc, 0x3e, 0xb8, 0x51, 0xd2, 0x5a, 0xdf, 0x1b, 0x1f, 0x9d, 0xda, 0xfb, 0x7f, 0x6f, 0x56, - 0x59, 0x9b, 0xe2, 0xe0, 0xcf, 0x50, 0xdb, 0xf4, 0x79, 0x94, 0x4d, 0xc0, 0x66, 0x7d, 0x6d, 0x9e, - 0xc5, 0xb0, 0x8c, 0xd3, 0x4a, 0x81, 0x07, 0xa8, 0x6d, 0x46, 0xe3, 0xb1, 0x8c, 0xc1, 0x6d, 0x5b, - 0xf5, 0x75, 0xa3, 0x7c, 0x54, 0xc6, 0x68, 0xb5, 0x8b, 0x7f, 0x46, 0xb7, 0x94, 0x66, 0xa9, 0x16, - 0xf1, 0xf8, 0x1b, 0x60, 0xa3, 0x89, 0x88, 0x61, 0x08, 0x5c, 0xc6, 0x23, 0x65, 0x5b, 0xb9, 0x11, - 0x7c, 0x50, 0xe4, 0xbd, 0x5b, 0xc3, 0x7a, 0x09, 0x5d, 0xe7, 0xc5, 0x4f, 0xd0, 0x4d, 0x2e, 0x63, - 0x9e, 0xa5, 0x29, 0xc4, 0xfc, 0xfc, 0x27, 0x39, 0x11, 0xfc, 0xdc, 0x36, 0xf4, 0x5a, 0x40, 0xca, - 0xbc, 0x6f, 0xde, 0x5f, 0x16, 0xfc, 0x57, 0x17, 0xa4, 0xab, 0x20, 0x7c, 0x07, 0x6d, 0xab, 0x4c, - 0x25, 0x10, 0x8f, 0xdc, 0xcd, 0x3d, 0x67, 0xd0, 0x0e, 0x3a, 0x45, 0xde, 0xdb, 0x1e, 0x4e, 0x43, - 0x74, 0xb6, 0x87, 0x9f, 0xa2, 0xce, 0x33, 0x19, 0x3e, 0x82, 0x28, 0x99, 0x30, 0x0d, 0x6e, 0xcb, - 0x36, 0xfb, 0x93, 0xf5, 0x1d, 0x39, 0x9c, 0x8b, 0xed, 0x78, 0xbe, 0x57, 0x66, 0xda, 0x59, 0xd8, - 0xa0, 0x8b, 0x48, 0xfc, 0x2b, 0xea, 0xaa, 0x8c, 0x73, 0x50, 0xea, 0x38, 0x9b, 0x1c, 0xca, 0x50, - 0x7d, 0x27, 0x94, 0x96, 0xe9, 0xf9, 0x91, 0x88, 0x84, 0x76, 0xb7, 0xf6, 0x9c, 0x41, 0x2b, 0xf0, - 0x8a, 0xbc, 0xd7, 0x1d, 0xae, 0x55, 0xd1, 0x57, 0x10, 0x30, 0x45, 0xbb, 0xc7, 0x4c, 0x4c, 0x60, - 0xb4, 0xc2, 0xde, 0xb6, 0xec, 0x6e, 0x91, 0xf7, 0x76, 0x1f, 0xd4, 0x2a, 0xe8, 0x1a, 0x67, 0xff, - 0xaf, 0x26, 0xba, 0xf1, 0xd2, 0x9b, 0x83, 0x7f, 0x40, 0x5b, 0x8c, 0x6b, 0x71, 0x66, 0x26, 0xcb, - 0x0c, 0xed, 0xed, 0xc5, 0x12, 0x99, 0xdb, 0x6f, 0x7e, 0x13, 0x50, 0x38, 0x06, 0xd3, 0x09, 0x98, - 0xbf, 0x6e, 0xf7, 0xac, 0x95, 0x96, 0x08, 0x3c, 0x41, 0x3b, 0x13, 0xa6, 0xf4, 0x6c, 0x28, 0xcd, - 0xc8, 0xd9, 0x26, 0x75, 0x0e, 0x3e, 0x7d, 0xbd, 0xd7, 0xcc, 0x38, 0x82, 0xf7, 0x8b, 0xbc, 0xb7, - 0x73, 0xb4, 0xc4, 0xa1, 0x2b, 0x64, 0x9c, 0x22, 0x6c, 0x63, 0x55, 0x09, 0xed, 0x79, 0xad, 0x37, - 0x3e, 0x6f, 0xb7, 0xc8, 0x7b, 0xf8, 0x68, 0x85, 0x44, 0x6b, 0xe8, 0xfd, 0x0b, 0x07, 0x2d, 0x4e, - 0xc4, 0x5b, 0xb8, 0x5c, 0x7f, 0x41, 0x6d, 0x3d, 0x9b, 0xe2, 0xe6, 0x9b, 0x4e, 0x71, 0x75, 0x4f, - 0x54, 0x23, 0x5c, 0xc1, 0xcc, 0xdd, 0xf8, 0xee, 0x92, 0xfe, 0x2d, 0x3c, 0xce, 0xdd, 0x97, 0xbe, - 0x15, 0x1f, 0xd6, 0x3d, 0x0a, 0x79, 0xc5, 0x27, 0x22, 0xb8, 0x7b, 0x71, 0xe9, 0x35, 0x9e, 0x5f, - 0x7a, 0x8d, 0x17, 0x97, 0x5e, 0xe3, 0xb7, 0xc2, 0x73, 0x2e, 0x0a, 0xcf, 0x79, 0x5e, 0x78, 0xce, - 0x8b, 0xc2, 0x73, 0xfe, 0x29, 0x3c, 0xe7, 0x8f, 0x7f, 0xbd, 0xc6, 0x63, 0x77, 0xdd, 0x5f, 0x8b, - 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0xd7, 0xf2, 0x8b, 0xe9, 0x8e, 0x08, 0x00, 0x00, + // 787 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x95, 0x41, 0x6f, 0x1b, 0x45, + 0x14, 0xc7, 0xbd, 0x49, 0x9c, 0xb8, 0xe3, 0x16, 0xd2, 0x01, 0xa5, 0x2b, 0x83, 0xd6, 0xc1, 0x55, + 0x85, 0x41, 0x30, 0x4b, 0x22, 0x84, 0x38, 0x55, 0xea, 0x16, 0x15, 0x08, 0x41, 0x45, 0xe3, 0x72, + 0xa9, 0x2a, 0xd4, 0xd9, 0xd9, 0x17, 0x67, 0x9a, 0xdd, 0x9d, 0xd5, 0xce, 0x6c, 0xa4, 0xdc, 0xb8, + 0x70, 0xe7, 0xbb, 0x70, 0xe7, 0x9c, 0x63, 0x6f, 0xf4, 0xb4, 0x22, 0xcb, 0xb7, 0xe0, 0x84, 0x66, + 0xbc, 0xb1, 0x5d, 0x7b, 0xdd, 0x84, 0x4b, 0x6f, 0x9e, 0x37, 0xff, 0xff, 0x6f, 0x9e, 0xde, 0x7b, + 0xfb, 0x8c, 0x1e, 0x9c, 0x7c, 0xad, 0x88, 0x90, 0xfe, 0x49, 0x11, 0x42, 0x9e, 0x82, 0x06, 0xe5, + 0x9f, 0x42, 0x1a, 0xc9, 0xdc, 0xaf, 0x2f, 0x58, 0x26, 0xfc, 0x90, 0x69, 0x7e, 0xec, 0x9f, 0xee, + 0x85, 0xa0, 0xd9, 0x9e, 0x3f, 0x86, 0x14, 0x72, 0xa6, 0x21, 0x22, 0x59, 0x2e, 0xb5, 0xc4, 0xee, + 0x44, 0x49, 0x58, 0x26, 0x88, 0x55, 0x92, 0x5a, 0xd9, 0xfb, 0x7c, 0x2c, 0xf4, 0x71, 0x11, 0x12, + 0x2e, 0x13, 0x7f, 0x2c, 0xc7, 0xd2, 0xb7, 0x86, 0xb0, 0x38, 0xb2, 0x27, 0x7b, 0xb0, 0xbf, 0x26, + 0xa0, 0xde, 0xdd, 0x86, 0x27, 0x17, 0x5f, 0xeb, 0x0d, 0xe6, 0x44, 0x5c, 0xe6, 0xd0, 0xa4, 0xf9, + 0x72, 0xa6, 0x49, 0x18, 0x3f, 0x16, 0x29, 0xe4, 0x67, 0x7e, 0x76, 0x32, 0x36, 0x01, 0xe5, 0x27, + 0xa0, 0x59, 0x93, 0xcb, 0x5f, 0xe5, 0xca, 0x8b, 0x54, 0x8b, 0x04, 0x96, 0x0c, 0x5f, 0x5d, 0x65, + 0x50, 0xfc, 0x18, 0x12, 0xb6, 0xe8, 0x1b, 0xfc, 0xb6, 0x86, 0xb6, 0x1e, 0xe6, 0x32, 0x3d, 0x90, + 0x21, 0x7e, 0x8e, 0x3a, 0x26, 0x9f, 0x88, 0x69, 0xe6, 0x3a, 0xbb, 0xce, 0xb0, 0xbb, 0xff, 0x05, + 0x99, 0xd5, 0x73, 0x8a, 0x25, 0xd9, 0xc9, 0xd8, 0x04, 0x14, 0x31, 0x6a, 0x72, 0xba, 0x47, 0x1e, + 0x87, 0x2f, 0x80, 0xeb, 0x1f, 0x41, 0xb3, 0x00, 0x9f, 0x97, 0xfd, 0x56, 0x55, 0xf6, 0xd1, 0x2c, + 0x46, 0xa7, 0x54, 0xfc, 0x2d, 0xda, 0x50, 0x19, 0x70, 0x77, 0xcd, 0xd2, 0xef, 0x91, 0x55, 0xdd, + 0x22, 0x75, 0x4a, 0xa3, 0x0c, 0x78, 0x70, 0xb3, 0x46, 0x6e, 0x98, 0x13, 0xb5, 0x00, 0xfc, 0x18, + 0x6d, 0x2a, 0xcd, 0x74, 0xa1, 0xdc, 0x75, 0x8b, 0xfa, 0xf8, 0x6a, 0x94, 0x95, 0x07, 0xef, 0xd4, + 0xb0, 0xcd, 0xc9, 0x99, 0xd6, 0x98, 0xc1, 0x1f, 0x0e, 0xea, 0xd6, 0xca, 0x43, 0xa1, 0x34, 0x7e, + 0xb6, 0x54, 0x0b, 0x72, 0xbd, 0x5a, 0x18, 0xb7, 0xad, 0xc4, 0x76, 0xfd, 0x52, 0xe7, 0x32, 0x32, + 0x57, 0x87, 0x47, 0xa8, 0x2d, 0x34, 0x24, 0xca, 0x5d, 0xdb, 0x5d, 0x1f, 0x76, 0xf7, 0x3f, 0xba, + 0x32, 0xfb, 0xe0, 0x56, 0x4d, 0x6b, 0x7f, 0x6f, 0x7c, 0x74, 0x62, 0x1f, 0xfc, 0xb5, 0x31, 0xcd, + 0xda, 0x14, 0x07, 0x7f, 0x86, 0x3a, 0xa6, 0xcf, 0x51, 0x11, 0x83, 0xcd, 0xfa, 0xc6, 0x2c, 0x8b, + 0x51, 0x1d, 0xa7, 0x53, 0x05, 0x1e, 0xa2, 0x8e, 0x19, 0x8d, 0xa7, 0x32, 0x05, 0xb7, 0x63, 0xd5, + 0x37, 0x8d, 0xf2, 0x49, 0x1d, 0xa3, 0xd3, 0x5b, 0xfc, 0x33, 0xba, 0xa3, 0x34, 0xcb, 0xb5, 0x48, + 0xc7, 0xdf, 0x00, 0x8b, 0x62, 0x91, 0xc2, 0x08, 0xb8, 0x4c, 0x23, 0x65, 0x5b, 0xb9, 0x1e, 0x7c, + 0x50, 0x95, 0xfd, 0x3b, 0xa3, 0x66, 0x09, 0x5d, 0xe5, 0xc5, 0xcf, 0xd0, 0x6d, 0x2e, 0x53, 0x5e, + 0xe4, 0x39, 0xa4, 0xfc, 0xec, 0x27, 0x19, 0x0b, 0x7e, 0x66, 0x1b, 0x7a, 0x23, 0x20, 0x75, 0xde, + 0xb7, 0x1f, 0x2e, 0x0a, 0xfe, 0x6d, 0x0a, 0xd2, 0x65, 0x10, 0xbe, 0x87, 0xb6, 0x54, 0xa1, 0x32, + 0x48, 0x23, 0x77, 0x63, 0xd7, 0x19, 0x76, 0x82, 0x6e, 0x55, 0xf6, 0xb7, 0x46, 0x93, 0x10, 0xbd, + 0xbc, 0xc3, 0xcf, 0x51, 0xf7, 0x85, 0x0c, 0x9f, 0x40, 0x92, 0xc5, 0x4c, 0x83, 0xdb, 0xb6, 0xcd, + 0xfe, 0x64, 0x75, 0x47, 0x0e, 0x66, 0x62, 0x3b, 0x9e, 0xef, 0xd5, 0x99, 0x76, 0xe7, 0x2e, 0xe8, + 0x3c, 0x12, 0xff, 0x82, 0x7a, 0xaa, 0xe0, 0x1c, 0x94, 0x3a, 0x2a, 0xe2, 0x03, 0x19, 0xaa, 0xef, + 0x84, 0xd2, 0x32, 0x3f, 0x3b, 0x14, 0x89, 0xd0, 0xee, 0xe6, 0xae, 0x33, 0x6c, 0x07, 0x5e, 0x55, + 0xf6, 0x7b, 0xa3, 0x95, 0x2a, 0xfa, 0x06, 0x02, 0xa6, 0x68, 0xe7, 0x88, 0x89, 0x18, 0xa2, 0x25, + 0xf6, 0x96, 0x65, 0xf7, 0xaa, 0xb2, 0xbf, 0xf3, 0xa8, 0x51, 0x41, 0x57, 0x38, 0x07, 0x7f, 0xae, + 0xa1, 0x5b, 0xaf, 0x7d, 0x39, 0xf8, 0x07, 0xb4, 0xc9, 0xb8, 0x16, 0xa7, 0x66, 0xb2, 0xcc, 0xd0, + 0xde, 0x9d, 0x2f, 0x91, 0xd9, 0x7e, 0xb3, 0x4d, 0x40, 0xe1, 0x08, 0x4c, 0x27, 0x60, 0xf6, 0xb9, + 0x3d, 0xb0, 0x56, 0x5a, 0x23, 0x70, 0x8c, 0xb6, 0x63, 0xa6, 0xf4, 0xe5, 0x50, 0x9a, 0x91, 0xb3, + 0x4d, 0xea, 0xee, 0x7f, 0x7a, 0xbd, 0xcf, 0xcc, 0x38, 0x82, 0xf7, 0xab, 0xb2, 0xbf, 0x7d, 0xb8, + 0xc0, 0xa1, 0x4b, 0x64, 0x9c, 0x23, 0x6c, 0x63, 0xd3, 0x12, 0xda, 0xf7, 0xda, 0xff, 0xfb, 0xbd, + 0x9d, 0xaa, 0xec, 0xe3, 0xc3, 0x25, 0x12, 0x6d, 0xa0, 0x9b, 0x85, 0xf2, 0xee, 0xc2, 0xa8, 0xbc, + 0x85, 0x05, 0x7b, 0xff, 0xb5, 0x05, 0xfb, 0x61, 0xd3, 0x14, 0x93, 0x37, 0xec, 0xd5, 0xe0, 0xfe, + 0xf9, 0x85, 0xd7, 0x7a, 0x79, 0xe1, 0xb5, 0x5e, 0x5d, 0x78, 0xad, 0x5f, 0x2b, 0xcf, 0x39, 0xaf, + 0x3c, 0xe7, 0x65, 0xe5, 0x39, 0xaf, 0x2a, 0xcf, 0xf9, 0xbb, 0xf2, 0x9c, 0xdf, 0xff, 0xf1, 0x5a, + 0x4f, 0xdd, 0x55, 0xff, 0xc7, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x61, 0x72, 0xc3, 0xe0, 0xc3, + 0x07, 0x00, 0x00, } func (m *CronJob) Marshal() (dAtA []byte, err error) { @@ -517,49 +487,6 @@ func (m *CronJobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *JobTemplate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JobTemplate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JobTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -700,19 +627,6 @@ func (m *CronJobStatus) Size() (n int) { return n } -func (m *JobTemplate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func (m *JobTemplateSpec) Size() (n int) { if m == nil { return 0 @@ -794,17 +708,6 @@ func (this *CronJobStatus) String() string { }, "") return s } -func (this *JobTemplate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *JobTemplateSpec) String() string { if this == nil { return "nil" @@ -1507,122 +1410,6 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *JobTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.proto b/vendor/k8s.io/api/batch/v1beta1/generated.proto index d8386a8f5..ac774f19a 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.proto +++ b/vendor/k8s.io/api/batch/v1beta1/generated.proto @@ -73,7 +73,6 @@ message CronJobSpec { // configuration, the controller will stop creating new new Jobs and will create a system event with the // reason UnknownTimeZone. // More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones - // This is beta field and must be enabled via the `CronJobTimeZone` feature gate. // +optional optional string timeZone = 8; @@ -84,6 +83,7 @@ message CronJobSpec { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one @@ -127,19 +127,6 @@ message CronJobStatus { optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5; } -// JobTemplate describes a template for creating copies of a predefined pod. -message JobTemplate { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional JobTemplateSpec template = 2; -} - // JobTemplateSpec describes the data a Job should have when created from a template message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. diff --git a/vendor/k8s.io/api/batch/v1beta1/register.go b/vendor/k8s.io/api/batch/v1beta1/register.go index 226de49f4..9382ca23f 100644 --- a/vendor/k8s.io/api/batch/v1beta1/register.go +++ b/vendor/k8s.io/api/batch/v1beta1/register.go @@ -44,7 +44,6 @@ var ( // Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &JobTemplate{}, &CronJob{}, &CronJobList{}, ) diff --git a/vendor/k8s.io/api/batch/v1beta1/types.go b/vendor/k8s.io/api/batch/v1beta1/types.go index 4c0d69dd6..976752a92 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types.go +++ b/vendor/k8s.io/api/batch/v1beta1/types.go @@ -22,24 +22,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.8 -// +k8s:prerelease-lifecycle-gen:deprecated=1.22 - -// JobTemplate describes a template for creating copies of a predefined pod. -type JobTemplate struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` -} - // JobTemplateSpec describes the data a Job should have when created from a template type JobTemplateSpec struct { // Standard object's metadata of the jobs created from this template. @@ -113,7 +95,6 @@ type CronJobSpec struct { // configuration, the controller will stop creating new new Jobs and will create a system event with the // reason UnknownTimeZone. // More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones - // This is beta field and must be enabled via the `CronJobTimeZone` feature gate. // +optional TimeZone *string `json:"timeZone,omitempty" protobuf:"bytes,8,opt,name=timeZone"` @@ -124,6 +105,7 @@ type CronJobSpec struct { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one diff --git a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go index 5716bbb86..3b3eafe8c 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CronJob = map[string]string{ @@ -51,9 +51,9 @@ func (CronJobList) SwaggerDoc() map[string]string { var map_CronJobSpec = map[string]string{ "": "CronJobSpec describes how the job execution will look like and when it will actually run.", "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", - "timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones This is beta field and must be enabled via the `CronJobTimeZone` feature gate.", + "timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones", "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", + "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are:\n\n- \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", "jobTemplate": "Specifies the job that will be created when executing a CronJob.", "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3.", @@ -75,16 +75,6 @@ func (CronJobStatus) SwaggerDoc() map[string]string { return map_CronJobStatus } -var map_JobTemplate = map[string]string{ - "": "JobTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", -} - -func (JobTemplate) SwaggerDoc() map[string]string { - return map_JobTemplate -} - var map_JobTemplateSpec = map[string]string{ "": "JobTemplateSpec describes the data a Job should have when created from a template", "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go index c3a3494c4..2c8570332 100644 --- a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go @@ -158,33 +158,6 @@ func (in *CronJobStatus) DeepCopy() *CronJobStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JobTemplate) DeepCopyInto(out *JobTemplate) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplate. -func (in *JobTemplate) DeepCopy() *JobTemplate { - if in == nil { - return nil - } - out := new(JobTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *JobTemplate) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *JobTemplateSpec) DeepCopyInto(out *JobTemplateSpec) { *out = *in diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go index 2836b3b01..b57e9f1b8 100644 --- a/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go @@ -72,21 +72,3 @@ func (in *CronJobList) APILifecycleReplacement() schema.GroupVersionKind { func (in *CronJobList) APILifecycleRemoved() (major, minor int) { return 1, 25 } - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *JobTemplate) APILifecycleIntroduced() (major, minor int) { - return 1, 8 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *JobTemplate) APILifecycleDeprecated() (major, minor int) { - return 1, 22 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *JobTemplate) APILifecycleRemoved() (major, minor int) { - return 1, 25 -} diff --git a/vendor/k8s.io/api/certificates/v1/types.go b/vendor/k8s.io/api/certificates/v1/types.go index af5efb516..92b2018e7 100644 --- a/vendor/k8s.io/api/certificates/v1/types.go +++ b/vendor/k8s.io/api/certificates/v1/types.go @@ -274,8 +274,9 @@ type CertificateSigningRequestList struct { } // KeyUsage specifies valid usage contexts for keys. -// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 +// See: // +// https://tools.ietf.org/html/rfc5280#section-4.2.1.3 // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 // // +enum diff --git a/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go index 0dc8a4c69..4bdf39ebb 100644 --- a/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CertificateSigningRequest = map[string]string{ diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go b/vendor/k8s.io/api/certificates/v1alpha1/doc.go similarity index 61% rename from vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go rename to vendor/k8s.io/api/certificates/v1alpha1/doc.go index 17c60895f..d83d0e820 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go +++ b/vendor/k8s.io/api/certificates/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,9 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -/* -Package inject defines interfaces and functions for propagating dependencies from a ControllerManager to -the components registered with it. Dependencies are propagated to Reconciler, Source, EventHandler and Predicate -objects which implement the Injectable interfaces. -*/ -package inject +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true + +// +groupName=certificates.k8s.io + +package v1alpha1 // import "k8s.io/api/certificates/v1alpha1" diff --git a/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go b/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go new file mode 100644 index 000000000..546ecbefb --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go @@ -0,0 +1,831 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ClusterTrustBundle) Reset() { *m = ClusterTrustBundle{} } +func (*ClusterTrustBundle) ProtoMessage() {} +func (*ClusterTrustBundle) Descriptor() ([]byte, []int) { + return fileDescriptor_8915b0d419f9eda6, []int{0} +} +func (m *ClusterTrustBundle) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterTrustBundle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterTrustBundle) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterTrustBundle.Merge(m, src) +} +func (m *ClusterTrustBundle) XXX_Size() int { + return m.Size() +} +func (m *ClusterTrustBundle) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterTrustBundle.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterTrustBundle proto.InternalMessageInfo + +func (m *ClusterTrustBundleList) Reset() { *m = ClusterTrustBundleList{} } +func (*ClusterTrustBundleList) ProtoMessage() {} +func (*ClusterTrustBundleList) Descriptor() ([]byte, []int) { + return fileDescriptor_8915b0d419f9eda6, []int{1} +} +func (m *ClusterTrustBundleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterTrustBundleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterTrustBundleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterTrustBundleList.Merge(m, src) +} +func (m *ClusterTrustBundleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterTrustBundleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterTrustBundleList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterTrustBundleList proto.InternalMessageInfo + +func (m *ClusterTrustBundleSpec) Reset() { *m = ClusterTrustBundleSpec{} } +func (*ClusterTrustBundleSpec) ProtoMessage() {} +func (*ClusterTrustBundleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_8915b0d419f9eda6, []int{2} +} +func (m *ClusterTrustBundleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterTrustBundleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterTrustBundleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterTrustBundleSpec.Merge(m, src) +} +func (m *ClusterTrustBundleSpec) XXX_Size() int { + return m.Size() +} +func (m *ClusterTrustBundleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterTrustBundleSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterTrustBundleSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ClusterTrustBundle)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundle") + proto.RegisterType((*ClusterTrustBundleList)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundleList") + proto.RegisterType((*ClusterTrustBundleSpec)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundleSpec") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1alpha1/generated.proto", fileDescriptor_8915b0d419f9eda6) +} + +var fileDescriptor_8915b0d419f9eda6 = []byte{ + // 448 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcf, 0x6b, 0x13, 0x41, + 0x14, 0xc7, 0x77, 0x6a, 0x0b, 0xed, 0x44, 0x41, 0x56, 0x90, 0x90, 0xc3, 0x34, 0xe4, 0xd4, 0x8b, + 0x33, 0x26, 0x54, 0xe9, 0x79, 0x05, 0xa1, 0xe0, 0x0f, 0xd8, 0x7a, 0xb1, 0x78, 0x70, 0x32, 0x79, + 0xdd, 0x8c, 0xc9, 0xee, 0x0e, 0x33, 0xb3, 0x01, 0x6f, 0x82, 0xff, 0x80, 0x7f, 0x56, 0x8e, 0xd5, + 0x53, 0x4f, 0xc5, 0xac, 0xff, 0x88, 0xcc, 0x64, 0x93, 0x5d, 0x5c, 0x25, 0xd2, 0xdb, 0xbe, 0x1f, + 0x9f, 0xef, 0x7b, 0xdf, 0xb7, 0x0c, 0x3e, 0x9f, 0x9d, 0x19, 0x2a, 0x73, 0x36, 0x2b, 0xc6, 0xa0, + 0x33, 0xb0, 0x60, 0xd8, 0x02, 0xb2, 0x49, 0xae, 0x59, 0x55, 0xe0, 0x4a, 0x32, 0x01, 0xda, 0xca, + 0x2b, 0x29, 0xb8, 0x2f, 0x0f, 0xf9, 0x5c, 0x4d, 0xf9, 0x90, 0x25, 0x90, 0x81, 0xe6, 0x16, 0x26, + 0x54, 0xe9, 0xdc, 0xe6, 0x61, 0x7f, 0x4d, 0x50, 0xae, 0x24, 0x6d, 0x12, 0x74, 0x43, 0xf4, 0x9e, + 0x24, 0xd2, 0x4e, 0x8b, 0x31, 0x15, 0x79, 0xca, 0x92, 0x3c, 0xc9, 0x99, 0x07, 0xc7, 0xc5, 0x95, + 0x8f, 0x7c, 0xe0, 0xbf, 0xd6, 0x82, 0xbd, 0xd3, 0x7a, 0x85, 0x94, 0x8b, 0xa9, 0xcc, 0x40, 0x7f, + 0x66, 0x6a, 0x96, 0xb8, 0x84, 0x61, 0x29, 0x58, 0xce, 0x16, 0xad, 0x35, 0x7a, 0xec, 0x5f, 0x94, + 0x2e, 0x32, 0x2b, 0x53, 0x68, 0x01, 0xcf, 0x77, 0x01, 0x46, 0x4c, 0x21, 0xe5, 0x7f, 0x72, 0x83, + 0x1f, 0x08, 0x87, 0x2f, 0xe6, 0x85, 0xb1, 0xa0, 0xdf, 0xe9, 0xc2, 0xd8, 0xa8, 0xc8, 0x26, 0x73, + 0x08, 0x3f, 0xe2, 0x43, 0xb7, 0xda, 0x84, 0x5b, 0xde, 0x45, 0x7d, 0x74, 0xd2, 0x19, 0x3d, 0xa5, + 0xf5, 0x65, 0xb6, 0x13, 0xa8, 0x9a, 0x25, 0x2e, 0x61, 0xa8, 0xeb, 0xa6, 0x8b, 0x21, 0x7d, 0x3b, + 0xfe, 0x04, 0xc2, 0xbe, 0x06, 0xcb, 0xa3, 0x70, 0x79, 0x7b, 0x1c, 0x94, 0xb7, 0xc7, 0xb8, 0xce, + 0xc5, 0x5b, 0xd5, 0xf0, 0x12, 0xef, 0x1b, 0x05, 0xa2, 0xbb, 0xe7, 0xd5, 0xcf, 0xe8, 0xae, 0xbb, + 0xd3, 0xf6, 0x96, 0x17, 0x0a, 0x44, 0x74, 0xbf, 0x9a, 0xb2, 0xef, 0xa2, 0xd8, 0x6b, 0x0e, 0xbe, + 0x23, 0xfc, 0xb8, 0xdd, 0xfe, 0x4a, 0x1a, 0x1b, 0x7e, 0x68, 0x19, 0xa3, 0xff, 0x67, 0xcc, 0xd1, + 0xde, 0xd6, 0xc3, 0x6a, 0xe0, 0xe1, 0x26, 0xd3, 0x30, 0xf5, 0x1e, 0x1f, 0x48, 0x0b, 0xa9, 0xe9, + 0xee, 0xf5, 0xef, 0x9d, 0x74, 0x46, 0xa7, 0x77, 0x71, 0x15, 0x3d, 0xa8, 0x06, 0x1c, 0x9c, 0x3b, + 0xa9, 0x78, 0xad, 0x38, 0xf8, 0xfa, 0x57, 0x4f, 0xce, 0x74, 0x38, 0xc2, 0xd8, 0xc8, 0x24, 0x03, + 0xfd, 0x86, 0xa7, 0xe0, 0x5d, 0x1d, 0xd5, 0xc7, 0xbf, 0xd8, 0x56, 0xe2, 0x46, 0x57, 0xf8, 0x0c, + 0x77, 0x6c, 0x2d, 0xe3, 0xff, 0xc2, 0x51, 0xf4, 0xa8, 0x82, 0x3a, 0x8d, 0x09, 0x71, 0xb3, 0x2f, + 0x7a, 0xb9, 0x5c, 0x91, 0xe0, 0x7a, 0x45, 0x82, 0x9b, 0x15, 0x09, 0xbe, 0x94, 0x04, 0x2d, 0x4b, + 0x82, 0xae, 0x4b, 0x82, 0x6e, 0x4a, 0x82, 0x7e, 0x96, 0x04, 0x7d, 0xfb, 0x45, 0x82, 0xcb, 0xfe, + 0xae, 0x67, 0xf7, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x05, 0xe9, 0xaa, 0x07, 0xb2, 0x03, 0x00, 0x00, +} + +func (m *ClusterTrustBundle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterTrustBundle) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterTrustBundle) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterTrustBundleList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterTrustBundleList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterTrustBundleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterTrustBundleSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterTrustBundleSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterTrustBundleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.TrustBundle) + copy(dAtA[i:], m.TrustBundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TrustBundle))) + i-- + dAtA[i] = 0x12 + i -= len(m.SignerName) + copy(dAtA[i:], m.SignerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SignerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ClusterTrustBundle) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterTrustBundleList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterTrustBundleSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SignerName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.TrustBundle) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterTrustBundle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterTrustBundle{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterTrustBundleSpec", "ClusterTrustBundleSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterTrustBundleList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterTrustBundle{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterTrustBundle", "ClusterTrustBundle", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterTrustBundleList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterTrustBundleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterTrustBundleSpec{`, + `SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`, + `TrustBundle:` + fmt.Sprintf("%v", this.TrustBundle) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ClusterTrustBundle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterTrustBundle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterTrustBundle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterTrustBundleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterTrustBundleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterTrustBundleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterTrustBundle{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterTrustBundleSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterTrustBundleSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterTrustBundleSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SignerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustBundle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TrustBundle = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/certificates/v1alpha1/generated.proto b/vendor/k8s.io/api/certificates/v1alpha1/generated.proto new file mode 100644 index 000000000..b0ebc4bd4 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/generated.proto @@ -0,0 +1,103 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.certificates.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/certificates/v1alpha1"; + +// ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors +// (root certificates). +// +// ClusterTrustBundle objects are considered to be readable by any authenticated +// user in the cluster, because they can be mounted by pods using the +// `clusterTrustBundle` projection. All service accounts have read access to +// ClusterTrustBundles by default. Users who only have namespace-level access +// to a cluster can read ClusterTrustBundles by impersonating a serviceaccount +// that they have access to. +// +// It can be optionally associated with a particular assigner, in which case it +// contains one valid set of trust anchors for that signer. Signers may have +// multiple associated ClusterTrustBundles; each is an independent set of trust +// anchors for that signer. Admission control is used to enforce that only users +// with permissions on the signer can create or modify the corresponding bundle. +message ClusterTrustBundle { + // metadata contains the object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec contains the signer (if any) and trust anchors. + optional ClusterTrustBundleSpec spec = 2; +} + +// ClusterTrustBundleList is a collection of ClusterTrustBundle objects +message ClusterTrustBundleList { + // metadata contains the list metadata. + // + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a collection of ClusterTrustBundle objects + repeated ClusterTrustBundle items = 2; +} + +// ClusterTrustBundleSpec contains the signer and trust anchors. +message ClusterTrustBundleSpec { + // signerName indicates the associated signer, if any. + // + // In order to create or update a ClusterTrustBundle that sets signerName, + // you must have the following cluster-scoped permission: + // group=certificates.k8s.io resource=signers resourceName= + // verb=attest. + // + // If signerName is not empty, then the ClusterTrustBundle object must be + // named with the signer name as a prefix (translating slashes to colons). + // For example, for the signer name `example.com/foo`, valid + // ClusterTrustBundle object names include `example.com:foo:abc` and + // `example.com:foo:v1`. + // + // If signerName is empty, then the ClusterTrustBundle object's name must + // not have such a prefix. + // + // List/watch requests for ClusterTrustBundles can filter on this field + // using a `spec.signerName=NAME` field selector. + // + // +optional + optional string signerName = 1; + + // trustBundle contains the individual X.509 trust anchors for this + // bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates. + // + // The data must consist only of PEM certificate blocks that parse as valid + // X.509 certificates. Each certificate must include a basic constraints + // extension with the CA bit set. The API server will reject objects that + // contain duplicate certificates, or that use PEM block headers. + // + // Users of ClusterTrustBundles, including Kubelet, are free to reorder and + // deduplicate certificate blocks in this file according to their own logic, + // as well as to drop PEM block headers and inter-block data. + optional string trustBundle = 2; +} + diff --git a/vendor/k8s.io/api/certificates/v1alpha1/register.go b/vendor/k8s.io/api/certificates/v1alpha1/register.go new file mode 100644 index 000000000..7288ed9a3 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/register.go @@ -0,0 +1,61 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "certificates.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + + localSchemeBuilder = &SchemeBuilder + + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ClusterTrustBundle{}, + &ClusterTrustBundleList{}, + ) + + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/certificates/v1alpha1/types.go b/vendor/k8s.io/api/certificates/v1alpha1/types.go new file mode 100644 index 000000000..1a9fda011 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/types.go @@ -0,0 +1,106 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors +// (root certificates). +// +// ClusterTrustBundle objects are considered to be readable by any authenticated +// user in the cluster, because they can be mounted by pods using the +// `clusterTrustBundle` projection. All service accounts have read access to +// ClusterTrustBundles by default. Users who only have namespace-level access +// to a cluster can read ClusterTrustBundles by impersonating a serviceaccount +// that they have access to. +// +// It can be optionally associated with a particular assigner, in which case it +// contains one valid set of trust anchors for that signer. Signers may have +// multiple associated ClusterTrustBundles; each is an independent set of trust +// anchors for that signer. Admission control is used to enforce that only users +// with permissions on the signer can create or modify the corresponding bundle. +type ClusterTrustBundle struct { + metav1.TypeMeta `json:",inline"` + + // metadata contains the object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec contains the signer (if any) and trust anchors. + Spec ClusterTrustBundleSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// ClusterTrustBundleSpec contains the signer and trust anchors. +type ClusterTrustBundleSpec struct { + // signerName indicates the associated signer, if any. + // + // In order to create or update a ClusterTrustBundle that sets signerName, + // you must have the following cluster-scoped permission: + // group=certificates.k8s.io resource=signers resourceName= + // verb=attest. + // + // If signerName is not empty, then the ClusterTrustBundle object must be + // named with the signer name as a prefix (translating slashes to colons). + // For example, for the signer name `example.com/foo`, valid + // ClusterTrustBundle object names include `example.com:foo:abc` and + // `example.com:foo:v1`. + // + // If signerName is empty, then the ClusterTrustBundle object's name must + // not have such a prefix. + // + // List/watch requests for ClusterTrustBundles can filter on this field + // using a `spec.signerName=NAME` field selector. + // + // +optional + SignerName string `json:"signerName,omitempty" protobuf:"bytes,1,opt,name=signerName"` + + // trustBundle contains the individual X.509 trust anchors for this + // bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates. + // + // The data must consist only of PEM certificate blocks that parse as valid + // X.509 certificates. Each certificate must include a basic constraints + // extension with the CA bit set. The API server will reject objects that + // contain duplicate certificates, or that use PEM block headers. + // + // Users of ClusterTrustBundles, including Kubelet, are free to reorder and + // deduplicate certificate blocks in this file according to their own logic, + // as well as to drop PEM block headers and inter-block data. + TrustBundle string `json:"trustBundle" protobuf:"bytes,2,opt,name=trustBundle"` +} + +// +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterTrustBundleList is a collection of ClusterTrustBundle objects +type ClusterTrustBundleList struct { + metav1.TypeMeta `json:",inline"` + + // metadata contains the list metadata. + // + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a collection of ClusterTrustBundle objects + Items []ClusterTrustBundle `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..bff649e3c --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,60 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ClusterTrustBundle = map[string]string{ + "": "ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors (root certificates).\n\nClusterTrustBundle objects are considered to be readable by any authenticated user in the cluster, because they can be mounted by pods using the `clusterTrustBundle` projection. All service accounts have read access to ClusterTrustBundles by default. Users who only have namespace-level access to a cluster can read ClusterTrustBundles by impersonating a serviceaccount that they have access to.\n\nIt can be optionally associated with a particular assigner, in which case it contains one valid set of trust anchors for that signer. Signers may have multiple associated ClusterTrustBundles; each is an independent set of trust anchors for that signer. Admission control is used to enforce that only users with permissions on the signer can create or modify the corresponding bundle.", + "metadata": "metadata contains the object metadata.", + "spec": "spec contains the signer (if any) and trust anchors.", +} + +func (ClusterTrustBundle) SwaggerDoc() map[string]string { + return map_ClusterTrustBundle +} + +var map_ClusterTrustBundleList = map[string]string{ + "": "ClusterTrustBundleList is a collection of ClusterTrustBundle objects", + "metadata": "metadata contains the list metadata.", + "items": "items is a collection of ClusterTrustBundle objects", +} + +func (ClusterTrustBundleList) SwaggerDoc() map[string]string { + return map_ClusterTrustBundleList +} + +var map_ClusterTrustBundleSpec = map[string]string{ + "": "ClusterTrustBundleSpec contains the signer and trust anchors.", + "signerName": "signerName indicates the associated signer, if any.\n\nIn order to create or update a ClusterTrustBundle that sets signerName, you must have the following cluster-scoped permission: group=certificates.k8s.io resource=signers resourceName= verb=attest.\n\nIf signerName is not empty, then the ClusterTrustBundle object must be named with the signer name as a prefix (translating slashes to colons). For example, for the signer name `example.com/foo`, valid ClusterTrustBundle object names include `example.com:foo:abc` and `example.com:foo:v1`.\n\nIf signerName is empty, then the ClusterTrustBundle object's name must not have such a prefix.\n\nList/watch requests for ClusterTrustBundles can filter on this field using a `spec.signerName=NAME` field selector.", + "trustBundle": "trustBundle contains the individual X.509 trust anchors for this bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.\n\nThe data must consist only of PEM certificate blocks that parse as valid X.509 certificates. Each certificate must include a basic constraints extension with the CA bit set. The API server will reject objects that contain duplicate certificates, or that use PEM block headers.\n\nUsers of ClusterTrustBundles, including Kubelet, are free to reorder and deduplicate certificate blocks in this file according to their own logic, as well as to drop PEM block headers and inter-block data.", +} + +func (ClusterTrustBundleSpec) SwaggerDoc() map[string]string { + return map_ClusterTrustBundleSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..30a4dc1e8 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,102 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTrustBundle) DeepCopyInto(out *ClusterTrustBundle) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundle. +func (in *ClusterTrustBundle) DeepCopy() *ClusterTrustBundle { + if in == nil { + return nil + } + out := new(ClusterTrustBundle) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterTrustBundle) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTrustBundleList) DeepCopyInto(out *ClusterTrustBundleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterTrustBundle, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleList. +func (in *ClusterTrustBundleList) DeepCopy() *ClusterTrustBundleList { + if in == nil { + return nil + } + out := new(ClusterTrustBundleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterTrustBundleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTrustBundleSpec) DeepCopyInto(out *ClusterTrustBundleSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleSpec. +func (in *ClusterTrustBundleSpec) DeepCopy() *ClusterTrustBundleSpec { + if in == nil { + return nil + } + out := new(ClusterTrustBundleSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..dfafa656c --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,58 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterTrustBundle) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ClusterTrustBundle) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ClusterTrustBundle) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterTrustBundleList) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ClusterTrustBundleList) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ClusterTrustBundleList) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto index e246fba02..f70f01ef7 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto @@ -124,8 +124,10 @@ message CertificateSigningRequestSpec { // allowedUsages specifies a set of usage contexts the key will be // valid for. - // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 - // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // See: + // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // // Valid values are: // "signing", // "digital signature", diff --git a/vendor/k8s.io/api/certificates/v1beta1/types.go b/vendor/k8s.io/api/certificates/v1beta1/types.go index fe7aab970..7e5a5c198 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/types.go +++ b/vendor/k8s.io/api/certificates/v1beta1/types.go @@ -89,8 +89,10 @@ type CertificateSigningRequestSpec struct { // allowedUsages specifies a set of usage contexts the key will be // valid for. - // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 - // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // See: + // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // // Valid values are: // "signing", // "digital signature", @@ -229,8 +231,9 @@ type CertificateSigningRequestList struct { } // KeyUsages specifies valid usage contexts for keys. -// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 +// See: // +// https://tools.ietf.org/html/rfc5280#section-4.2.1.3 // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 type KeyUsage string diff --git a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go index d3f318150..f9ab1f13d 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CertificateSigningRequest = map[string]string{ @@ -55,7 +55,7 @@ var map_CertificateSigningRequestSpec = map[string]string{ "request": "Base64-encoded PKCS#10 CSR data", "signerName": "Requested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted:\n 1. If it's a kubelet client certificate, it is assigned\n \"kubernetes.io/kube-apiserver-client-kubelet\".\n 2. If it's a kubelet serving certificate, it is assigned\n \"kubernetes.io/kubelet-serving\".\n 3. Otherwise, it is assigned \"kubernetes.io/legacy-unknown\".\nDistribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.", "expirationSeconds": "expirationSeconds is the requested duration of validity of the issued certificate. The certificate signer may issue a certificate with a different validity duration so a client must check the delta between the notBefore and and notAfter fields in the issued certificate to determine the actual duration.\n\nThe v1.22+ in-tree implementations of the well-known Kubernetes signers will honor this field as long as the requested duration is not greater than the maximum duration they will honor per the --cluster-signing-duration CLI flag to the Kubernetes controller manager.\n\nCertificate signers may not honor this field for various reasons:\n\n 1. Old signer that is unaware of the field (such as the in-tree\n implementations prior to v1.22)\n 2. Signer whose configured maximum is shorter than the requested duration\n 3. Signer whose configured minimum is longer than the requested duration\n\nThe minimum valid value for expirationSeconds is 600, i.e. 10 minutes.", - "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12\nValid values are:\n \"signing\",\n \"digital signature\",\n \"content commitment\",\n \"key encipherment\",\n \"key agreement\",\n \"data encipherment\",\n \"cert sign\",\n \"crl sign\",\n \"encipher only\",\n \"decipher only\",\n \"any\",\n \"server auth\",\n \"client auth\",\n \"code signing\",\n \"email protection\",\n \"s/mime\",\n \"ipsec end system\",\n \"ipsec tunnel\",\n \"ipsec user\",\n \"timestamping\",\n \"ocsp signing\",\n \"microsoft sgc\",\n \"netscape sgc\"", + "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See:\n\thttps://tools.ietf.org/html/rfc5280#section-4.2.1.3\n\thttps://tools.ietf.org/html/rfc5280#section-4.2.1.12\n\nValid values are:\n \"signing\",\n \"digital signature\",\n \"content commitment\",\n \"key encipherment\",\n \"key agreement\",\n \"data encipherment\",\n \"cert sign\",\n \"crl sign\",\n \"encipher only\",\n \"decipher only\",\n \"any\",\n \"server auth\",\n \"client auth\",\n \"code signing\",\n \"email protection\",\n \"s/mime\",\n \"ipsec end system\",\n \"ipsec tunnel\",\n \"ipsec user\",\n \"timestamping\",\n \"ocsp signing\",\n \"microsoft sgc\",\n \"netscape sgc\"", "username": "Information about the requesting user. See user.Info interface for details.", "uid": "UID information about the requesting user. See user.Info interface for details.", "groups": "Group information about the requesting user. See user.Info interface for details.", diff --git a/vendor/k8s.io/api/coordination/v1/generated.proto b/vendor/k8s.io/api/coordination/v1/generated.proto index b1efb737f..36fce60f2 100644 --- a/vendor/k8s.io/api/coordination/v1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1/generated.proto @@ -34,7 +34,7 @@ message Lease { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional LeaseSpec spec = 2; @@ -47,7 +47,7 @@ message LeaseList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated Lease items = 2; } @@ -59,7 +59,7 @@ message LeaseSpec { // leaseDurationSeconds is a duration that candidates for a lease need // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // observed renewTime. // +optional optional int32 leaseDurationSeconds = 2; diff --git a/vendor/k8s.io/api/coordination/v1/types.go b/vendor/k8s.io/api/coordination/v1/types.go index 7a5605ace..b0e1d0682 100644 --- a/vendor/k8s.io/api/coordination/v1/types.go +++ b/vendor/k8s.io/api/coordination/v1/types.go @@ -30,7 +30,7 @@ type Lease struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -43,7 +43,7 @@ type LeaseSpec struct { HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` // leaseDurationSeconds is a duration that candidates for a lease need // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // observed renewTime. // +optional LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"` // acquireTime is a time when the current lease was acquired. @@ -69,6 +69,6 @@ type LeaseList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []Lease `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go index 0f1440430..f3720eca0 100644 --- a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Lease = map[string]string{ "": "Lease defines a lease concept.", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Lease) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (Lease) SwaggerDoc() map[string]string { var map_LeaseList = map[string]string{ "": "LeaseList is a list of Lease objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (LeaseList) SwaggerDoc() map[string]string { @@ -50,7 +50,7 @@ func (LeaseList) SwaggerDoc() map[string]string { var map_LeaseSpec = map[string]string{ "": "LeaseSpec is a specification of a Lease.", "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", - "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.", + "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime.", "acquireTime": "acquireTime is a time when the current lease was acquired.", "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/vendor/k8s.io/api/coordination/v1beta1/generated.proto index 85faa3b09..92c8918b8 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.proto @@ -34,7 +34,7 @@ message Lease { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional LeaseSpec spec = 2; @@ -47,7 +47,7 @@ message LeaseList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated Lease items = 2; } @@ -59,7 +59,7 @@ message LeaseSpec { // leaseDurationSeconds is a duration that candidates for a lease need // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // observed renewTime. // +optional optional int32 leaseDurationSeconds = 2; diff --git a/vendor/k8s.io/api/coordination/v1beta1/types.go b/vendor/k8s.io/api/coordination/v1beta1/types.go index 8f300fca8..3a3d5f32e 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types.go @@ -33,7 +33,7 @@ type Lease struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -46,7 +46,7 @@ type LeaseSpec struct { HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` // leaseDurationSeconds is a duration that candidates for a lease need // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // observed renewTime. // +optional LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"` // acquireTime is a time when the current lease was acquired. @@ -75,6 +75,6 @@ type LeaseList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []Lease `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go index f557d265d..78ca4e393 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Lease = map[string]string{ "": "Lease defines a lease concept.", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Lease) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (Lease) SwaggerDoc() map[string]string { var map_LeaseList = map[string]string{ "": "LeaseList is a list of Lease objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (LeaseList) SwaggerDoc() map[string]string { @@ -50,7 +50,7 @@ func (LeaseList) SwaggerDoc() map[string]string { var map_LeaseSpec = map[string]string{ "": "LeaseSpec is a specification of a Lease.", "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", - "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.", + "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime.", "acquireTime": "acquireTime is a time when the current lease was acquired.", "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go index eb9517e1d..61f86f850 100644 --- a/vendor/k8s.io/api/core/v1/annotation_key_constants.go +++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file should be consistent with pkg/api/annotation_key_constants.go. +// This file should be consistent with pkg/apis/core/annotation_key_constants.go. package v1 @@ -144,8 +144,19 @@ const ( // This annotation is beta-level and is only honored when PodDeletionCost feature is enabled. PodDeletionCost = "controller.kubernetes.io/pod-deletion-cost" - // AnnotationTopologyAwareHints can be used to enable or disable Topology - // Aware Hints for a Service. This may be set to "Auto" or "Disabled". Any - // other value is treated as "Disabled". - AnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints" + // DeprecatedAnnotationTopologyAwareHints can be used to enable or disable + // Topology Aware Hints for a Service. This may be set to "Auto" or + // "Disabled". Any other value is treated as "Disabled". This annotation has + // been deprecated in favor of the "service.kubernetes.io/topology-mode" + // annotation. + DeprecatedAnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints" + + // AnnotationTopologyMode can be used to enable or disable Topology Aware + // Routing for a Service. Well known values are "Auto" and "Disabled". + // Implementations may choose to develop new topology approaches, exposing + // them with domain-prefixed values. For example, "example.com/lowest-rtt" + // could be a valid implementation-specific value for this annotation. These + // heuristics will often populate topology hints on EndpointSlices, but that + // is not a requirement. + AnnotationTopologyMode = "service.kubernetes.io/topology-mode" ) diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index a8df2b222..c76646296 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -889,10 +889,38 @@ func (m *ContainerPort) XXX_DiscardUnknown() { var xxx_messageInfo_ContainerPort proto.InternalMessageInfo +func (m *ContainerResizePolicy) Reset() { *m = ContainerResizePolicy{} } +func (*ContainerResizePolicy) ProtoMessage() {} +func (*ContainerResizePolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{30} +} +func (m *ContainerResizePolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResizePolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResizePolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResizePolicy.Merge(m, src) +} +func (m *ContainerResizePolicy) XXX_Size() int { + return m.Size() +} +func (m *ContainerResizePolicy) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResizePolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResizePolicy proto.InternalMessageInfo + func (m *ContainerState) Reset() { *m = ContainerState{} } func (*ContainerState) ProtoMessage() {} func (*ContainerState) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{30} + return fileDescriptor_83c10c24ec417dc9, []int{31} } func (m *ContainerState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -920,7 +948,7 @@ var xxx_messageInfo_ContainerState proto.InternalMessageInfo func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } func (*ContainerStateRunning) ProtoMessage() {} func (*ContainerStateRunning) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{31} + return fileDescriptor_83c10c24ec417dc9, []int{32} } func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -948,7 +976,7 @@ var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } func (*ContainerStateTerminated) ProtoMessage() {} func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{32} + return fileDescriptor_83c10c24ec417dc9, []int{33} } func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -976,7 +1004,7 @@ var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } func (*ContainerStateWaiting) ProtoMessage() {} func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{33} + return fileDescriptor_83c10c24ec417dc9, []int{34} } func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1004,7 +1032,7 @@ var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } func (*ContainerStatus) ProtoMessage() {} func (*ContainerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{34} + return fileDescriptor_83c10c24ec417dc9, []int{35} } func (m *ContainerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1032,7 +1060,7 @@ var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } func (*DaemonEndpoint) ProtoMessage() {} func (*DaemonEndpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{35} + return fileDescriptor_83c10c24ec417dc9, []int{36} } func (m *DaemonEndpoint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1060,7 +1088,7 @@ var xxx_messageInfo_DaemonEndpoint proto.InternalMessageInfo func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } func (*DownwardAPIProjection) ProtoMessage() {} func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{36} + return fileDescriptor_83c10c24ec417dc9, []int{37} } func (m *DownwardAPIProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1088,7 +1116,7 @@ var xxx_messageInfo_DownwardAPIProjection proto.InternalMessageInfo func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } func (*DownwardAPIVolumeFile) ProtoMessage() {} func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{37} + return fileDescriptor_83c10c24ec417dc9, []int{38} } func (m *DownwardAPIVolumeFile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1116,7 +1144,7 @@ var xxx_messageInfo_DownwardAPIVolumeFile proto.InternalMessageInfo func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } func (*DownwardAPIVolumeSource) ProtoMessage() {} func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{38} + return fileDescriptor_83c10c24ec417dc9, []int{39} } func (m *DownwardAPIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1144,7 +1172,7 @@ var xxx_messageInfo_DownwardAPIVolumeSource proto.InternalMessageInfo func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } func (*EmptyDirVolumeSource) ProtoMessage() {} func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{39} + return fileDescriptor_83c10c24ec417dc9, []int{40} } func (m *EmptyDirVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1172,7 +1200,7 @@ var xxx_messageInfo_EmptyDirVolumeSource proto.InternalMessageInfo func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } func (*EndpointAddress) ProtoMessage() {} func (*EndpointAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{40} + return fileDescriptor_83c10c24ec417dc9, []int{41} } func (m *EndpointAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1200,7 +1228,7 @@ var xxx_messageInfo_EndpointAddress proto.InternalMessageInfo func (m *EndpointPort) Reset() { *m = EndpointPort{} } func (*EndpointPort) ProtoMessage() {} func (*EndpointPort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{41} + return fileDescriptor_83c10c24ec417dc9, []int{42} } func (m *EndpointPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1228,7 +1256,7 @@ var xxx_messageInfo_EndpointPort proto.InternalMessageInfo func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } func (*EndpointSubset) ProtoMessage() {} func (*EndpointSubset) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{42} + return fileDescriptor_83c10c24ec417dc9, []int{43} } func (m *EndpointSubset) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1256,7 +1284,7 @@ var xxx_messageInfo_EndpointSubset proto.InternalMessageInfo func (m *Endpoints) Reset() { *m = Endpoints{} } func (*Endpoints) ProtoMessage() {} func (*Endpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{43} + return fileDescriptor_83c10c24ec417dc9, []int{44} } func (m *Endpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1284,7 +1312,7 @@ var xxx_messageInfo_Endpoints proto.InternalMessageInfo func (m *EndpointsList) Reset() { *m = EndpointsList{} } func (*EndpointsList) ProtoMessage() {} func (*EndpointsList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{44} + return fileDescriptor_83c10c24ec417dc9, []int{45} } func (m *EndpointsList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1312,7 +1340,7 @@ var xxx_messageInfo_EndpointsList proto.InternalMessageInfo func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } func (*EnvFromSource) ProtoMessage() {} func (*EnvFromSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{45} + return fileDescriptor_83c10c24ec417dc9, []int{46} } func (m *EnvFromSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1340,7 +1368,7 @@ var xxx_messageInfo_EnvFromSource proto.InternalMessageInfo func (m *EnvVar) Reset() { *m = EnvVar{} } func (*EnvVar) ProtoMessage() {} func (*EnvVar) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{46} + return fileDescriptor_83c10c24ec417dc9, []int{47} } func (m *EnvVar) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1368,7 +1396,7 @@ var xxx_messageInfo_EnvVar proto.InternalMessageInfo func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } func (*EnvVarSource) ProtoMessage() {} func (*EnvVarSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{47} + return fileDescriptor_83c10c24ec417dc9, []int{48} } func (m *EnvVarSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1396,7 +1424,7 @@ var xxx_messageInfo_EnvVarSource proto.InternalMessageInfo func (m *EphemeralContainer) Reset() { *m = EphemeralContainer{} } func (*EphemeralContainer) ProtoMessage() {} func (*EphemeralContainer) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{48} + return fileDescriptor_83c10c24ec417dc9, []int{49} } func (m *EphemeralContainer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1424,7 +1452,7 @@ var xxx_messageInfo_EphemeralContainer proto.InternalMessageInfo func (m *EphemeralContainerCommon) Reset() { *m = EphemeralContainerCommon{} } func (*EphemeralContainerCommon) ProtoMessage() {} func (*EphemeralContainerCommon) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{49} + return fileDescriptor_83c10c24ec417dc9, []int{50} } func (m *EphemeralContainerCommon) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1452,7 +1480,7 @@ var xxx_messageInfo_EphemeralContainerCommon proto.InternalMessageInfo func (m *EphemeralVolumeSource) Reset() { *m = EphemeralVolumeSource{} } func (*EphemeralVolumeSource) ProtoMessage() {} func (*EphemeralVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{50} + return fileDescriptor_83c10c24ec417dc9, []int{51} } func (m *EphemeralVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1480,7 +1508,7 @@ var xxx_messageInfo_EphemeralVolumeSource proto.InternalMessageInfo func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{51} + return fileDescriptor_83c10c24ec417dc9, []int{52} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1508,7 +1536,7 @@ var xxx_messageInfo_Event proto.InternalMessageInfo func (m *EventList) Reset() { *m = EventList{} } func (*EventList) ProtoMessage() {} func (*EventList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{52} + return fileDescriptor_83c10c24ec417dc9, []int{53} } func (m *EventList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1536,7 +1564,7 @@ var xxx_messageInfo_EventList proto.InternalMessageInfo func (m *EventSeries) Reset() { *m = EventSeries{} } func (*EventSeries) ProtoMessage() {} func (*EventSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{53} + return fileDescriptor_83c10c24ec417dc9, []int{54} } func (m *EventSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1564,7 +1592,7 @@ var xxx_messageInfo_EventSeries proto.InternalMessageInfo func (m *EventSource) Reset() { *m = EventSource{} } func (*EventSource) ProtoMessage() {} func (*EventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{54} + return fileDescriptor_83c10c24ec417dc9, []int{55} } func (m *EventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1592,7 +1620,7 @@ var xxx_messageInfo_EventSource proto.InternalMessageInfo func (m *ExecAction) Reset() { *m = ExecAction{} } func (*ExecAction) ProtoMessage() {} func (*ExecAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{55} + return fileDescriptor_83c10c24ec417dc9, []int{56} } func (m *ExecAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1620,7 +1648,7 @@ var xxx_messageInfo_ExecAction proto.InternalMessageInfo func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } func (*FCVolumeSource) ProtoMessage() {} func (*FCVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{56} + return fileDescriptor_83c10c24ec417dc9, []int{57} } func (m *FCVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1648,7 +1676,7 @@ var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } func (*FlexPersistentVolumeSource) ProtoMessage() {} func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{57} + return fileDescriptor_83c10c24ec417dc9, []int{58} } func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1676,7 +1704,7 @@ var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } func (*FlexVolumeSource) ProtoMessage() {} func (*FlexVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{58} + return fileDescriptor_83c10c24ec417dc9, []int{59} } func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1704,7 +1732,7 @@ var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } func (*FlockerVolumeSource) ProtoMessage() {} func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{59} + return fileDescriptor_83c10c24ec417dc9, []int{60} } func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1732,7 +1760,7 @@ var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{60} + return fileDescriptor_83c10c24ec417dc9, []int{61} } func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1760,7 +1788,7 @@ var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo func (m *GRPCAction) Reset() { *m = GRPCAction{} } func (*GRPCAction) ProtoMessage() {} func (*GRPCAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{61} + return fileDescriptor_83c10c24ec417dc9, []int{62} } func (m *GRPCAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1788,7 +1816,7 @@ var xxx_messageInfo_GRPCAction proto.InternalMessageInfo func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } func (*GitRepoVolumeSource) ProtoMessage() {} func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{62} + return fileDescriptor_83c10c24ec417dc9, []int{63} } func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1816,7 +1844,7 @@ var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{63} + return fileDescriptor_83c10c24ec417dc9, []int{64} } func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1844,7 +1872,7 @@ var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } func (*GlusterfsVolumeSource) ProtoMessage() {} func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{64} + return fileDescriptor_83c10c24ec417dc9, []int{65} } func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1872,7 +1900,7 @@ var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } func (*HTTPGetAction) ProtoMessage() {} func (*HTTPGetAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{65} + return fileDescriptor_83c10c24ec417dc9, []int{66} } func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1900,7 +1928,7 @@ var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (*HTTPHeader) ProtoMessage() {} func (*HTTPHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{66} + return fileDescriptor_83c10c24ec417dc9, []int{67} } func (m *HTTPHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1928,7 +1956,7 @@ var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo func (m *HostAlias) Reset() { *m = HostAlias{} } func (*HostAlias) ProtoMessage() {} func (*HostAlias) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{67} + return fileDescriptor_83c10c24ec417dc9, []int{68} } func (m *HostAlias) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1956,7 +1984,7 @@ var xxx_messageInfo_HostAlias proto.InternalMessageInfo func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } func (*HostPathVolumeSource) ProtoMessage() {} func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{68} + return fileDescriptor_83c10c24ec417dc9, []int{69} } func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1984,7 +2012,7 @@ var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } func (*ISCSIPersistentVolumeSource) ProtoMessage() {} func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{69} + return fileDescriptor_83c10c24ec417dc9, []int{70} } func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2012,7 +2040,7 @@ var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } func (*ISCSIVolumeSource) ProtoMessage() {} func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{70} + return fileDescriptor_83c10c24ec417dc9, []int{71} } func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2040,7 +2068,7 @@ var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} func (*KeyToPath) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{71} + return fileDescriptor_83c10c24ec417dc9, []int{72} } func (m *KeyToPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2068,7 +2096,7 @@ var xxx_messageInfo_KeyToPath proto.InternalMessageInfo func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} func (*Lifecycle) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{72} + return fileDescriptor_83c10c24ec417dc9, []int{73} } func (m *Lifecycle) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2096,7 +2124,7 @@ var xxx_messageInfo_Lifecycle proto.InternalMessageInfo func (m *LifecycleHandler) Reset() { *m = LifecycleHandler{} } func (*LifecycleHandler) ProtoMessage() {} func (*LifecycleHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{73} + return fileDescriptor_83c10c24ec417dc9, []int{74} } func (m *LifecycleHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2124,7 +2152,7 @@ var xxx_messageInfo_LifecycleHandler proto.InternalMessageInfo func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} func (*LimitRange) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{74} + return fileDescriptor_83c10c24ec417dc9, []int{75} } func (m *LimitRange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2152,7 +2180,7 @@ var xxx_messageInfo_LimitRange proto.InternalMessageInfo func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} func (*LimitRangeItem) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{75} + return fileDescriptor_83c10c24ec417dc9, []int{76} } func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2180,7 +2208,7 @@ var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} func (*LimitRangeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{76} + return fileDescriptor_83c10c24ec417dc9, []int{77} } func (m *LimitRangeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2208,7 +2236,7 @@ var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} func (*LimitRangeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{77} + return fileDescriptor_83c10c24ec417dc9, []int{78} } func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2236,7 +2264,7 @@ var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} func (*List) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{78} + return fileDescriptor_83c10c24ec417dc9, []int{79} } func (m *List) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2264,7 +2292,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{79} + return fileDescriptor_83c10c24ec417dc9, []int{80} } func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2292,7 +2320,7 @@ var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{80} + return fileDescriptor_83c10c24ec417dc9, []int{81} } func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2320,7 +2348,7 @@ var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} func (*LocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{81} + return fileDescriptor_83c10c24ec417dc9, []int{82} } func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2348,7 +2376,7 @@ var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } func (*LocalVolumeSource) ProtoMessage() {} func (*LocalVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{82} + return fileDescriptor_83c10c24ec417dc9, []int{83} } func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2376,7 +2404,7 @@ var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} func (*NFSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{83} + return fileDescriptor_83c10c24ec417dc9, []int{84} } func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2404,7 +2432,7 @@ var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} func (*Namespace) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{84} + return fileDescriptor_83c10c24ec417dc9, []int{85} } func (m *Namespace) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2432,7 +2460,7 @@ var xxx_messageInfo_Namespace proto.InternalMessageInfo func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} } func (*NamespaceCondition) ProtoMessage() {} func (*NamespaceCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{85} + return fileDescriptor_83c10c24ec417dc9, []int{86} } func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2460,7 +2488,7 @@ var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} func (*NamespaceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{86} + return fileDescriptor_83c10c24ec417dc9, []int{87} } func (m *NamespaceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2488,7 +2516,7 @@ var xxx_messageInfo_NamespaceList proto.InternalMessageInfo func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} func (*NamespaceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{87} + return fileDescriptor_83c10c24ec417dc9, []int{88} } func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2516,7 +2544,7 @@ var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} func (*NamespaceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{88} + return fileDescriptor_83c10c24ec417dc9, []int{89} } func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2544,7 +2572,7 @@ var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{89} + return fileDescriptor_83c10c24ec417dc9, []int{90} } func (m *Node) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2572,7 +2600,7 @@ var xxx_messageInfo_Node proto.InternalMessageInfo func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} func (*NodeAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{90} + return fileDescriptor_83c10c24ec417dc9, []int{91} } func (m *NodeAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2600,7 +2628,7 @@ var xxx_messageInfo_NodeAddress proto.InternalMessageInfo func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} func (*NodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{91} + return fileDescriptor_83c10c24ec417dc9, []int{92} } func (m *NodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2628,7 +2656,7 @@ var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} func (*NodeCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{92} + return fileDescriptor_83c10c24ec417dc9, []int{93} } func (m *NodeCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2656,7 +2684,7 @@ var xxx_messageInfo_NodeCondition proto.InternalMessageInfo func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } func (*NodeConfigSource) ProtoMessage() {} func (*NodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{93} + return fileDescriptor_83c10c24ec417dc9, []int{94} } func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2684,7 +2712,7 @@ var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } func (*NodeConfigStatus) ProtoMessage() {} func (*NodeConfigStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{94} + return fileDescriptor_83c10c24ec417dc9, []int{95} } func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2712,7 +2740,7 @@ var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{95} + return fileDescriptor_83c10c24ec417dc9, []int{96} } func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2740,7 +2768,7 @@ var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} func (*NodeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{96} + return fileDescriptor_83c10c24ec417dc9, []int{97} } func (m *NodeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2768,7 +2796,7 @@ var xxx_messageInfo_NodeList proto.InternalMessageInfo func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} func (*NodeProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{97} + return fileDescriptor_83c10c24ec417dc9, []int{98} } func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2796,7 +2824,7 @@ var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo func (m *NodeResources) Reset() { *m = NodeResources{} } func (*NodeResources) ProtoMessage() {} func (*NodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{98} + return fileDescriptor_83c10c24ec417dc9, []int{99} } func (m *NodeResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2824,7 +2852,7 @@ var xxx_messageInfo_NodeResources proto.InternalMessageInfo func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} func (*NodeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{99} + return fileDescriptor_83c10c24ec417dc9, []int{100} } func (m *NodeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2852,7 +2880,7 @@ var xxx_messageInfo_NodeSelector proto.InternalMessageInfo func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{100} + return fileDescriptor_83c10c24ec417dc9, []int{101} } func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2880,7 +2908,7 @@ var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{101} + return fileDescriptor_83c10c24ec417dc9, []int{102} } func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2908,7 +2936,7 @@ var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} func (*NodeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{102} + return fileDescriptor_83c10c24ec417dc9, []int{103} } func (m *NodeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2936,7 +2964,7 @@ var xxx_messageInfo_NodeSpec proto.InternalMessageInfo func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} func (*NodeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{103} + return fileDescriptor_83c10c24ec417dc9, []int{104} } func (m *NodeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2964,7 +2992,7 @@ var xxx_messageInfo_NodeStatus proto.InternalMessageInfo func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} func (*NodeSystemInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{104} + return fileDescriptor_83c10c24ec417dc9, []int{105} } func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2992,7 +3020,7 @@ var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{105} + return fileDescriptor_83c10c24ec417dc9, []int{106} } func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3020,7 +3048,7 @@ var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} func (*ObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{106} + return fileDescriptor_83c10c24ec417dc9, []int{107} } func (m *ObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3048,7 +3076,7 @@ var xxx_messageInfo_ObjectReference proto.InternalMessageInfo func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} func (*PersistentVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{107} + return fileDescriptor_83c10c24ec417dc9, []int{108} } func (m *PersistentVolume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3076,7 +3104,7 @@ var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{108} + return fileDescriptor_83c10c24ec417dc9, []int{109} } func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3104,7 +3132,7 @@ var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } func (*PersistentVolumeClaimCondition) ProtoMessage() {} func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{109} + return fileDescriptor_83c10c24ec417dc9, []int{110} } func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3132,7 +3160,7 @@ var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{110} + return fileDescriptor_83c10c24ec417dc9, []int{111} } func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3160,7 +3188,7 @@ var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{111} + return fileDescriptor_83c10c24ec417dc9, []int{112} } func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3188,7 +3216,7 @@ var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{112} + return fileDescriptor_83c10c24ec417dc9, []int{113} } func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3216,7 +3244,7 @@ var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo func (m *PersistentVolumeClaimTemplate) Reset() { *m = PersistentVolumeClaimTemplate{} } func (*PersistentVolumeClaimTemplate) ProtoMessage() {} func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{113} + return fileDescriptor_83c10c24ec417dc9, []int{114} } func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3244,7 +3272,7 @@ var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{114} + return fileDescriptor_83c10c24ec417dc9, []int{115} } func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3272,7 +3300,7 @@ var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} func (*PersistentVolumeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{115} + return fileDescriptor_83c10c24ec417dc9, []int{116} } func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3300,7 +3328,7 @@ var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{116} + return fileDescriptor_83c10c24ec417dc9, []int{117} } func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3328,7 +3356,7 @@ var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{117} + return fileDescriptor_83c10c24ec417dc9, []int{118} } func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3356,7 +3384,7 @@ var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{118} + return fileDescriptor_83c10c24ec417dc9, []int{119} } func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3384,7 +3412,7 @@ var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{119} + return fileDescriptor_83c10c24ec417dc9, []int{120} } func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3412,7 +3440,7 @@ var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} func (*Pod) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{120} + return fileDescriptor_83c10c24ec417dc9, []int{121} } func (m *Pod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3440,7 +3468,7 @@ var xxx_messageInfo_Pod proto.InternalMessageInfo func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} func (*PodAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{121} + return fileDescriptor_83c10c24ec417dc9, []int{122} } func (m *PodAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3468,7 +3496,7 @@ var xxx_messageInfo_PodAffinity proto.InternalMessageInfo func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} func (*PodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{122} + return fileDescriptor_83c10c24ec417dc9, []int{123} } func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3496,7 +3524,7 @@ var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} func (*PodAntiAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{123} + return fileDescriptor_83c10c24ec417dc9, []int{124} } func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3524,7 +3552,7 @@ var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} func (*PodAttachOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{124} + return fileDescriptor_83c10c24ec417dc9, []int{125} } func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3552,7 +3580,7 @@ var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} func (*PodCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{125} + return fileDescriptor_83c10c24ec417dc9, []int{126} } func (m *PodCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3580,7 +3608,7 @@ var xxx_messageInfo_PodCondition proto.InternalMessageInfo func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } func (*PodDNSConfig) ProtoMessage() {} func (*PodDNSConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{126} + return fileDescriptor_83c10c24ec417dc9, []int{127} } func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3608,7 +3636,7 @@ var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } func (*PodDNSConfigOption) ProtoMessage() {} func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{127} + return fileDescriptor_83c10c24ec417dc9, []int{128} } func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3636,7 +3664,7 @@ var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} func (*PodExecOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{128} + return fileDescriptor_83c10c24ec417dc9, []int{129} } func (m *PodExecOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3664,7 +3692,7 @@ var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo func (m *PodIP) Reset() { *m = PodIP{} } func (*PodIP) ProtoMessage() {} func (*PodIP) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{129} + return fileDescriptor_83c10c24ec417dc9, []int{130} } func (m *PodIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3692,7 +3720,7 @@ var xxx_messageInfo_PodIP proto.InternalMessageInfo func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} func (*PodList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{130} + return fileDescriptor_83c10c24ec417dc9, []int{131} } func (m *PodList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3720,7 +3748,7 @@ var xxx_messageInfo_PodList proto.InternalMessageInfo func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} func (*PodLogOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{131} + return fileDescriptor_83c10c24ec417dc9, []int{132} } func (m *PodLogOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3748,7 +3776,7 @@ var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo func (m *PodOS) Reset() { *m = PodOS{} } func (*PodOS) ProtoMessage() {} func (*PodOS) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{132} + return fileDescriptor_83c10c24ec417dc9, []int{133} } func (m *PodOS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3776,7 +3804,7 @@ var xxx_messageInfo_PodOS proto.InternalMessageInfo func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{133} + return fileDescriptor_83c10c24ec417dc9, []int{134} } func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3804,7 +3832,7 @@ var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} func (*PodProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{134} + return fileDescriptor_83c10c24ec417dc9, []int{135} } func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3832,7 +3860,7 @@ var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } func (*PodReadinessGate) ProtoMessage() {} func (*PodReadinessGate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{135} + return fileDescriptor_83c10c24ec417dc9, []int{136} } func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3860,7 +3888,7 @@ var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo func (m *PodResourceClaim) Reset() { *m = PodResourceClaim{} } func (*PodResourceClaim) ProtoMessage() {} func (*PodResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{136} + return fileDescriptor_83c10c24ec417dc9, []int{137} } func (m *PodResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3888,7 +3916,7 @@ var xxx_messageInfo_PodResourceClaim proto.InternalMessageInfo func (m *PodSchedulingGate) Reset() { *m = PodSchedulingGate{} } func (*PodSchedulingGate) ProtoMessage() {} func (*PodSchedulingGate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{137} + return fileDescriptor_83c10c24ec417dc9, []int{138} } func (m *PodSchedulingGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3916,7 +3944,7 @@ var xxx_messageInfo_PodSchedulingGate proto.InternalMessageInfo func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} func (*PodSecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{138} + return fileDescriptor_83c10c24ec417dc9, []int{139} } func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3944,7 +3972,7 @@ var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} func (*PodSignature) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{139} + return fileDescriptor_83c10c24ec417dc9, []int{140} } func (m *PodSignature) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3972,7 +4000,7 @@ var xxx_messageInfo_PodSignature proto.InternalMessageInfo func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} func (*PodSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{140} + return fileDescriptor_83c10c24ec417dc9, []int{141} } func (m *PodSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4000,7 +4028,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} func (*PodStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{141} + return fileDescriptor_83c10c24ec417dc9, []int{142} } func (m *PodStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4028,7 +4056,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} func (*PodStatusResult) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{142} + return fileDescriptor_83c10c24ec417dc9, []int{143} } func (m *PodStatusResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4056,7 +4084,7 @@ var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} func (*PodTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{143} + return fileDescriptor_83c10c24ec417dc9, []int{144} } func (m *PodTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4084,7 +4112,7 @@ var xxx_messageInfo_PodTemplate proto.InternalMessageInfo func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} func (*PodTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{144} + return fileDescriptor_83c10c24ec417dc9, []int{145} } func (m *PodTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4112,7 +4140,7 @@ var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} func (*PodTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{145} + return fileDescriptor_83c10c24ec417dc9, []int{146} } func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4140,7 +4168,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo func (m *PortStatus) Reset() { *m = PortStatus{} } func (*PortStatus) ProtoMessage() {} func (*PortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{146} + return fileDescriptor_83c10c24ec417dc9, []int{147} } func (m *PortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4168,7 +4196,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{147} + return fileDescriptor_83c10c24ec417dc9, []int{148} } func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4196,7 +4224,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{148} + return fileDescriptor_83c10c24ec417dc9, []int{149} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4224,7 +4252,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{149} + return fileDescriptor_83c10c24ec417dc9, []int{150} } func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4252,7 +4280,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{150} + return fileDescriptor_83c10c24ec417dc9, []int{151} } func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4280,7 +4308,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} func (*Probe) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{151} + return fileDescriptor_83c10c24ec417dc9, []int{152} } func (m *Probe) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4308,7 +4336,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo func (m *ProbeHandler) Reset() { *m = ProbeHandler{} } func (*ProbeHandler) ProtoMessage() {} func (*ProbeHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{152} + return fileDescriptor_83c10c24ec417dc9, []int{153} } func (m *ProbeHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4336,7 +4364,7 @@ var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{153} + return fileDescriptor_83c10c24ec417dc9, []int{154} } func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4364,7 +4392,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{154} + return fileDescriptor_83c10c24ec417dc9, []int{155} } func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4392,7 +4420,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{155} + return fileDescriptor_83c10c24ec417dc9, []int{156} } func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4420,7 +4448,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} func (*RBDVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{156} + return fileDescriptor_83c10c24ec417dc9, []int{157} } func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4448,7 +4476,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} func (*RangeAllocation) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{157} + return fileDescriptor_83c10c24ec417dc9, []int{158} } func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4476,7 +4504,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} func (*ReplicationController) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{158} + return fileDescriptor_83c10c24ec417dc9, []int{159} } func (m *ReplicationController) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4504,7 +4532,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{159} + return fileDescriptor_83c10c24ec417dc9, []int{160} } func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4532,7 +4560,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{160} + return fileDescriptor_83c10c24ec417dc9, []int{161} } func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4560,7 +4588,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{161} + return fileDescriptor_83c10c24ec417dc9, []int{162} } func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4588,7 +4616,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{162} + return fileDescriptor_83c10c24ec417dc9, []int{163} } func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4616,7 +4644,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } func (*ResourceClaim) ProtoMessage() {} func (*ResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{163} + return fileDescriptor_83c10c24ec417dc9, []int{164} } func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4644,7 +4672,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{164} + return fileDescriptor_83c10c24ec417dc9, []int{165} } func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4672,7 +4700,7 @@ var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} func (*ResourceQuota) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{165} + return fileDescriptor_83c10c24ec417dc9, []int{166} } func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4700,7 +4728,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} func (*ResourceQuotaList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{166} + return fileDescriptor_83c10c24ec417dc9, []int{167} } func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4728,7 +4756,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{167} + return fileDescriptor_83c10c24ec417dc9, []int{168} } func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4756,7 +4784,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{168} + return fileDescriptor_83c10c24ec417dc9, []int{169} } func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4784,7 +4812,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} func (*ResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{169} + return fileDescriptor_83c10c24ec417dc9, []int{170} } func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4812,7 +4840,7 @@ var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} func (*SELinuxOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{170} + return fileDescriptor_83c10c24ec417dc9, []int{171} } func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4840,7 +4868,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{171} + return fileDescriptor_83c10c24ec417dc9, []int{172} } func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4868,7 +4896,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{172} + return fileDescriptor_83c10c24ec417dc9, []int{173} } func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4896,7 +4924,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } func (*ScopeSelector) ProtoMessage() {} func (*ScopeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{173} + return fileDescriptor_83c10c24ec417dc9, []int{174} } func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4924,7 +4952,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } func (*ScopedResourceSelectorRequirement) ProtoMessage() {} func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{174} + return fileDescriptor_83c10c24ec417dc9, []int{175} } func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4952,7 +4980,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo func (m *SeccompProfile) Reset() { *m = SeccompProfile{} } func (*SeccompProfile) ProtoMessage() {} func (*SeccompProfile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{175} + return fileDescriptor_83c10c24ec417dc9, []int{176} } func (m *SeccompProfile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4980,7 +5008,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} func (*Secret) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{176} + return fileDescriptor_83c10c24ec417dc9, []int{177} } func (m *Secret) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5008,7 +5036,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} func (*SecretEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{177} + return fileDescriptor_83c10c24ec417dc9, []int{178} } func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5036,7 +5064,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} func (*SecretKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{178} + return fileDescriptor_83c10c24ec417dc9, []int{179} } func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5064,7 +5092,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} func (*SecretList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{179} + return fileDescriptor_83c10c24ec417dc9, []int{180} } func (m *SecretList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5092,7 +5120,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} func (*SecretProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{180} + return fileDescriptor_83c10c24ec417dc9, []int{181} } func (m *SecretProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5120,7 +5148,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} func (*SecretReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{181} + return fileDescriptor_83c10c24ec417dc9, []int{182} } func (m *SecretReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5148,7 +5176,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} func (*SecretVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{182} + return fileDescriptor_83c10c24ec417dc9, []int{183} } func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5176,7 +5204,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} func (*SecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{183} + return fileDescriptor_83c10c24ec417dc9, []int{184} } func (m *SecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5204,7 +5232,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} func (*SerializedReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{184} + return fileDescriptor_83c10c24ec417dc9, []int{185} } func (m *SerializedReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5232,7 +5260,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{185} + return fileDescriptor_83c10c24ec417dc9, []int{186} } func (m *Service) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5260,7 +5288,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} func (*ServiceAccount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{186} + return fileDescriptor_83c10c24ec417dc9, []int{187} } func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5288,7 +5316,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} func (*ServiceAccountList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{187} + return fileDescriptor_83c10c24ec417dc9, []int{188} } func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5316,7 +5344,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } func (*ServiceAccountTokenProjection) ProtoMessage() {} func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{188} + return fileDescriptor_83c10c24ec417dc9, []int{189} } func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5344,7 +5372,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} func (*ServiceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{189} + return fileDescriptor_83c10c24ec417dc9, []int{190} } func (m *ServiceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5372,7 +5400,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} func (*ServicePort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{190} + return fileDescriptor_83c10c24ec417dc9, []int{191} } func (m *ServicePort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5400,7 +5428,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{191} + return fileDescriptor_83c10c24ec417dc9, []int{192} } func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5428,7 +5456,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} func (*ServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{192} + return fileDescriptor_83c10c24ec417dc9, []int{193} } func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5456,7 +5484,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} func (*ServiceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{193} + return fileDescriptor_83c10c24ec417dc9, []int{194} } func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5484,7 +5512,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{194} + return fileDescriptor_83c10c24ec417dc9, []int{195} } func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5512,7 +5540,7 @@ var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{195} + return fileDescriptor_83c10c24ec417dc9, []int{196} } func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5540,7 +5568,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{196} + return fileDescriptor_83c10c24ec417dc9, []int{197} } func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5568,7 +5596,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} func (*Sysctl) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{197} + return fileDescriptor_83c10c24ec417dc9, []int{198} } func (m *Sysctl) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5596,7 +5624,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} func (*TCPSocketAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{198} + return fileDescriptor_83c10c24ec417dc9, []int{199} } func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5624,7 +5652,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} func (*Taint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{199} + return fileDescriptor_83c10c24ec417dc9, []int{200} } func (m *Taint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5652,7 +5680,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} func (*Toleration) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{200} + return fileDescriptor_83c10c24ec417dc9, []int{201} } func (m *Toleration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5680,7 +5708,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{201} + return fileDescriptor_83c10c24ec417dc9, []int{202} } func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5708,7 +5736,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{202} + return fileDescriptor_83c10c24ec417dc9, []int{203} } func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5736,7 +5764,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } func (*TopologySpreadConstraint) ProtoMessage() {} func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{203} + return fileDescriptor_83c10c24ec417dc9, []int{204} } func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5764,7 +5792,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{204} + return fileDescriptor_83c10c24ec417dc9, []int{205} } func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5792,7 +5820,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo func (m *TypedObjectReference) Reset() { *m = TypedObjectReference{} } func (*TypedObjectReference) ProtoMessage() {} func (*TypedObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{205} + return fileDescriptor_83c10c24ec417dc9, []int{206} } func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5820,7 +5848,7 @@ var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{206} + return fileDescriptor_83c10c24ec417dc9, []int{207} } func (m *Volume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5848,7 +5876,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} func (*VolumeDevice) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{207} + return fileDescriptor_83c10c24ec417dc9, []int{208} } func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5876,7 +5904,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} func (*VolumeMount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{208} + return fileDescriptor_83c10c24ec417dc9, []int{209} } func (m *VolumeMount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5904,7 +5932,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{209} + return fileDescriptor_83c10c24ec417dc9, []int{210} } func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5932,7 +5960,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} func (*VolumeProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{210} + return fileDescriptor_83c10c24ec417dc9, []int{211} } func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5960,7 +5988,7 @@ var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} func (*VolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{211} + return fileDescriptor_83c10c24ec417dc9, []int{212} } func (m *VolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5988,7 +6016,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{212} + return fileDescriptor_83c10c24ec417dc9, []int{213} } func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6016,7 +6044,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{213} + return fileDescriptor_83c10c24ec417dc9, []int{214} } func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6044,7 +6072,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } func (*WindowsSecurityContextOptions) ProtoMessage() {} func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{214} + return fileDescriptor_83c10c24ec417dc9, []int{215} } func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6104,11 +6132,13 @@ func init() { proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container") proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage") proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort") + proto.RegisterType((*ContainerResizePolicy)(nil), "k8s.io.api.core.v1.ContainerResizePolicy") proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState") proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning") proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated") proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting") proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ContainerStatus.AllocatedResourcesEntry") proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint") proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection") proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile") @@ -6320,917 +6350,925 @@ func init() { } var fileDescriptor_83c10c24ec417dc9 = []byte{ - // 14547 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x69, 0x8c, 0x24, 0xd7, - 0x79, 0x98, 0xaa, 0x7b, 0xae, 0xfe, 0xe6, 0x7e, 0xb3, 0xbb, 0x9c, 0x1d, 0x72, 0x77, 0x96, 0x45, - 0x72, 0xb9, 0x14, 0xc9, 0x19, 0x2d, 0x0f, 0x89, 0x26, 0x25, 0x5a, 0x73, 0xee, 0x36, 0x77, 0x67, - 0xb6, 0xf9, 0x7a, 0x76, 0x57, 0x07, 0x25, 0xa8, 0xa6, 0xfb, 0xcd, 0x4c, 0x69, 0xba, 0xab, 0x9a, - 0x55, 0xd5, 0xb3, 0x3b, 0x8c, 0x84, 0x38, 0xf2, 0x29, 0xdb, 0x09, 0x84, 0xc0, 0x39, 0x20, 0x1b, - 0x46, 0xe0, 0x38, 0xb6, 0x15, 0xe5, 0x52, 0xe4, 0xd8, 0x8e, 0xe5, 0xd8, 0xce, 0xed, 0x04, 0x81, - 0xe3, 0x18, 0x88, 0x65, 0xc0, 0xc8, 0xc4, 0x5e, 0x07, 0x30, 0x04, 0x24, 0xb6, 0x73, 0x01, 0xc9, - 0xc4, 0x89, 0x83, 0x77, 0xd6, 0x7b, 0x75, 0x74, 0xf7, 0x2c, 0x67, 0x47, 0x94, 0xc0, 0x7f, 0xdd, - 0xdf, 0xf7, 0xbd, 0xef, 0xbd, 0x7a, 0xe7, 0xf7, 0xbe, 0xef, 0x7b, 0xdf, 0x07, 0xaf, 0xec, 0xbe, - 0x14, 0xce, 0xb9, 0xfe, 0xfc, 0x6e, 0x7b, 0x93, 0x04, 0x1e, 0x89, 0x48, 0x38, 0xbf, 0x47, 0xbc, - 0xba, 0x1f, 0xcc, 0x0b, 0x84, 0xd3, 0x72, 0xe7, 0x6b, 0x7e, 0x40, 0xe6, 0xf7, 0x2e, 0xcf, 0x6f, - 0x13, 0x8f, 0x04, 0x4e, 0x44, 0xea, 0x73, 0xad, 0xc0, 0x8f, 0x7c, 0x84, 0x38, 0xcd, 0x9c, 0xd3, - 0x72, 0xe7, 0x28, 0xcd, 0xdc, 0xde, 0xe5, 0x99, 0x67, 0xb7, 0xdd, 0x68, 0xa7, 0xbd, 0x39, 0x57, - 0xf3, 0x9b, 0xf3, 0xdb, 0xfe, 0xb6, 0x3f, 0xcf, 0x48, 0x37, 0xdb, 0x5b, 0xec, 0x1f, 0xfb, 0xc3, - 0x7e, 0x71, 0x16, 0x33, 0x2f, 0xc4, 0xd5, 0x34, 0x9d, 0xda, 0x8e, 0xeb, 0x91, 0x60, 0x7f, 0xbe, - 0xb5, 0xbb, 0xcd, 0xea, 0x0d, 0x48, 0xe8, 0xb7, 0x83, 0x1a, 0x49, 0x56, 0xdc, 0xb1, 0x54, 0x38, - 0xdf, 0x24, 0x91, 0x93, 0xd1, 0xdc, 0x99, 0xf9, 0xbc, 0x52, 0x41, 0xdb, 0x8b, 0xdc, 0x66, 0xba, - 0x9a, 0xf7, 0x77, 0x2b, 0x10, 0xd6, 0x76, 0x48, 0xd3, 0x49, 0x95, 0x7b, 0x3e, 0xaf, 0x5c, 0x3b, - 0x72, 0x1b, 0xf3, 0xae, 0x17, 0x85, 0x51, 0x90, 0x2c, 0x64, 0x7f, 0xdd, 0x82, 0x0b, 0x0b, 0xb7, - 0xab, 0x2b, 0x0d, 0x27, 0x8c, 0xdc, 0xda, 0x62, 0xc3, 0xaf, 0xed, 0x56, 0x23, 0x3f, 0x20, 0xb7, - 0xfc, 0x46, 0xbb, 0x49, 0xaa, 0xac, 0x23, 0xd0, 0x33, 0x30, 0xb4, 0xc7, 0xfe, 0x97, 0x97, 0xa7, - 0xad, 0x0b, 0xd6, 0xa5, 0xd2, 0xe2, 0xc4, 0xaf, 0x1d, 0xcc, 0xbe, 0xe7, 0xde, 0xc1, 0xec, 0xd0, - 0x2d, 0x01, 0xc7, 0x8a, 0x02, 0x5d, 0x84, 0x81, 0xad, 0x70, 0x63, 0xbf, 0x45, 0xa6, 0x0b, 0x8c, - 0x76, 0x4c, 0xd0, 0x0e, 0xac, 0x56, 0x29, 0x14, 0x0b, 0x2c, 0x9a, 0x87, 0x52, 0xcb, 0x09, 0x22, - 0x37, 0x72, 0x7d, 0x6f, 0xba, 0x78, 0xc1, 0xba, 0xd4, 0xbf, 0x38, 0x29, 0x48, 0x4b, 0x15, 0x89, - 0xc0, 0x31, 0x0d, 0x6d, 0x46, 0x40, 0x9c, 0xfa, 0x0d, 0xaf, 0xb1, 0x3f, 0xdd, 0x77, 0xc1, 0xba, - 0x34, 0x14, 0x37, 0x03, 0x0b, 0x38, 0x56, 0x14, 0xf6, 0x17, 0x0b, 0x30, 0xb4, 0xb0, 0xb5, 0xe5, - 0x7a, 0x6e, 0xb4, 0x8f, 0x6e, 0xc1, 0x88, 0xe7, 0xd7, 0x89, 0xfc, 0xcf, 0xbe, 0x62, 0xf8, 0xb9, - 0x0b, 0x73, 0xe9, 0xa9, 0x34, 0xb7, 0xae, 0xd1, 0x2d, 0x4e, 0xdc, 0x3b, 0x98, 0x1d, 0xd1, 0x21, - 0xd8, 0xe0, 0x83, 0x30, 0x0c, 0xb7, 0xfc, 0xba, 0x62, 0x5b, 0x60, 0x6c, 0x67, 0xb3, 0xd8, 0x56, - 0x62, 0xb2, 0xc5, 0xf1, 0x7b, 0x07, 0xb3, 0xc3, 0x1a, 0x00, 0xeb, 0x4c, 0xd0, 0x26, 0x8c, 0xd3, - 0xbf, 0x5e, 0xe4, 0x2a, 0xbe, 0x45, 0xc6, 0xf7, 0xb1, 0x3c, 0xbe, 0x1a, 0xe9, 0xe2, 0xd4, 0xbd, - 0x83, 0xd9, 0xf1, 0x04, 0x10, 0x27, 0x19, 0xda, 0x6f, 0xc1, 0xd8, 0x42, 0x14, 0x39, 0xb5, 0x1d, - 0x52, 0xe7, 0x23, 0x88, 0x5e, 0x80, 0x3e, 0xcf, 0x69, 0x12, 0x31, 0xbe, 0x17, 0x44, 0xc7, 0xf6, - 0xad, 0x3b, 0x4d, 0x72, 0x78, 0x30, 0x3b, 0x71, 0xd3, 0x73, 0xdf, 0x6c, 0x8b, 0x59, 0x41, 0x61, - 0x98, 0x51, 0xa3, 0xe7, 0x00, 0xea, 0x64, 0xcf, 0xad, 0x91, 0x8a, 0x13, 0xed, 0x88, 0xf1, 0x46, - 0xa2, 0x2c, 0x2c, 0x2b, 0x0c, 0xd6, 0xa8, 0xec, 0xbb, 0x50, 0x5a, 0xd8, 0xf3, 0xdd, 0x7a, 0xc5, - 0xaf, 0x87, 0x68, 0x17, 0xc6, 0x5b, 0x01, 0xd9, 0x22, 0x81, 0x02, 0x4d, 0x5b, 0x17, 0x8a, 0x97, - 0x86, 0x9f, 0xbb, 0x94, 0xf9, 0xb1, 0x26, 0xe9, 0x8a, 0x17, 0x05, 0xfb, 0x8b, 0x0f, 0x89, 0xfa, - 0xc6, 0x13, 0x58, 0x9c, 0xe4, 0x6c, 0xff, 0xb3, 0x02, 0x9c, 0x5e, 0x78, 0xab, 0x1d, 0x90, 0x65, - 0x37, 0xdc, 0x4d, 0xce, 0xf0, 0xba, 0x1b, 0xee, 0xae, 0xc7, 0x3d, 0xa0, 0xa6, 0xd6, 0xb2, 0x80, - 0x63, 0x45, 0x81, 0x9e, 0x85, 0x41, 0xfa, 0xfb, 0x26, 0x2e, 0x8b, 0x4f, 0x9e, 0x12, 0xc4, 0xc3, - 0xcb, 0x4e, 0xe4, 0x2c, 0x73, 0x14, 0x96, 0x34, 0x68, 0x0d, 0x86, 0x6b, 0x6c, 0x41, 0x6e, 0xaf, - 0xf9, 0x75, 0xc2, 0x06, 0xb3, 0xb4, 0xf8, 0x34, 0x25, 0x5f, 0x8a, 0xc1, 0x87, 0x07, 0xb3, 0xd3, - 0xbc, 0x6d, 0x82, 0x85, 0x86, 0xc3, 0x7a, 0x79, 0x64, 0xab, 0xf5, 0xd5, 0xc7, 0x38, 0x41, 0xc6, - 0xda, 0xba, 0xa4, 0x2d, 0x95, 0x7e, 0xb6, 0x54, 0x46, 0xb2, 0x97, 0x09, 0xba, 0x0c, 0x7d, 0xbb, - 0xae, 0x57, 0x9f, 0x1e, 0x60, 0xbc, 0xce, 0xd1, 0x31, 0xbf, 0xe6, 0x7a, 0xf5, 0xc3, 0x83, 0xd9, - 0x49, 0xa3, 0x39, 0x14, 0x88, 0x19, 0xa9, 0xfd, 0xdf, 0x2d, 0x98, 0x65, 0xb8, 0x55, 0xb7, 0x41, - 0x2a, 0x24, 0x08, 0xdd, 0x30, 0x22, 0x5e, 0x64, 0x74, 0xe8, 0x73, 0x00, 0x21, 0xa9, 0x05, 0x24, - 0xd2, 0xba, 0x54, 0x4d, 0x8c, 0xaa, 0xc2, 0x60, 0x8d, 0x8a, 0x6e, 0x08, 0xe1, 0x8e, 0x13, 0xb0, - 0xf9, 0x25, 0x3a, 0x56, 0x6d, 0x08, 0x55, 0x89, 0xc0, 0x31, 0x8d, 0xb1, 0x21, 0x14, 0xbb, 0x6d, - 0x08, 0xe8, 0x43, 0x30, 0x1e, 0x57, 0x16, 0xb6, 0x9c, 0x9a, 0xec, 0x40, 0xb6, 0x64, 0xaa, 0x26, - 0x0a, 0x27, 0x69, 0xed, 0xbf, 0x69, 0x89, 0xc9, 0x43, 0xbf, 0xfa, 0x1d, 0xfe, 0xad, 0xf6, 0x2f, - 0x58, 0x30, 0xb8, 0xe8, 0x7a, 0x75, 0xd7, 0xdb, 0x46, 0x9f, 0x82, 0x21, 0x7a, 0x36, 0xd5, 0x9d, - 0xc8, 0x11, 0xfb, 0xde, 0xfb, 0xb4, 0xb5, 0xa5, 0x8e, 0x8a, 0xb9, 0xd6, 0xee, 0x36, 0x05, 0x84, - 0x73, 0x94, 0x9a, 0xae, 0xb6, 0x1b, 0x9b, 0x9f, 0x26, 0xb5, 0x68, 0x8d, 0x44, 0x4e, 0xfc, 0x39, - 0x31, 0x0c, 0x2b, 0xae, 0xe8, 0x1a, 0x0c, 0x44, 0x4e, 0xb0, 0x4d, 0x22, 0xb1, 0x01, 0x66, 0x6e, - 0x54, 0xbc, 0x24, 0xa6, 0x2b, 0x92, 0x78, 0x35, 0x12, 0x1f, 0x0b, 0x1b, 0xac, 0x28, 0x16, 0x2c, - 0xec, 0xff, 0x3b, 0x08, 0x67, 0x97, 0xaa, 0xe5, 0x9c, 0x79, 0x75, 0x11, 0x06, 0xea, 0x81, 0xbb, - 0x47, 0x02, 0xd1, 0xcf, 0x8a, 0xcb, 0x32, 0x83, 0x62, 0x81, 0x45, 0x2f, 0xc1, 0x08, 0x3f, 0x90, - 0xae, 0x3a, 0x5e, 0xbd, 0x21, 0xbb, 0xf8, 0x94, 0xa0, 0x1e, 0xb9, 0xa5, 0xe1, 0xb0, 0x41, 0x79, - 0xc4, 0x49, 0x75, 0x31, 0xb1, 0x18, 0xf3, 0x0e, 0xbb, 0xcf, 0x5b, 0x30, 0xc1, 0xab, 0x59, 0x88, - 0xa2, 0xc0, 0xdd, 0x6c, 0x47, 0x24, 0x9c, 0xee, 0x67, 0x3b, 0xdd, 0x52, 0x56, 0x6f, 0xe5, 0xf6, - 0xc0, 0xdc, 0xad, 0x04, 0x17, 0xbe, 0x09, 0x4e, 0x8b, 0x7a, 0x27, 0x92, 0x68, 0x9c, 0xaa, 0x16, - 0x7d, 0xb7, 0x05, 0x33, 0x35, 0xdf, 0x8b, 0x02, 0xbf, 0xd1, 0x20, 0x41, 0xa5, 0xbd, 0xd9, 0x70, - 0xc3, 0x1d, 0x3e, 0x4f, 0x31, 0xd9, 0x62, 0x3b, 0x41, 0xce, 0x18, 0x2a, 0x22, 0x31, 0x86, 0xe7, - 0xef, 0x1d, 0xcc, 0xce, 0x2c, 0xe5, 0xb2, 0xc2, 0x1d, 0xaa, 0x41, 0xbb, 0x80, 0xe8, 0x51, 0x5a, - 0x8d, 0x9c, 0x6d, 0x12, 0x57, 0x3e, 0xd8, 0x7b, 0xe5, 0x67, 0xee, 0x1d, 0xcc, 0xa2, 0xf5, 0x14, - 0x0b, 0x9c, 0xc1, 0x16, 0xbd, 0x09, 0xa7, 0x28, 0x34, 0xf5, 0xad, 0x43, 0xbd, 0x57, 0x37, 0x7d, - 0xef, 0x60, 0xf6, 0xd4, 0x7a, 0x06, 0x13, 0x9c, 0xc9, 0x1a, 0x7d, 0x97, 0x05, 0x67, 0xe3, 0xcf, - 0x5f, 0xb9, 0xdb, 0x72, 0xbc, 0x7a, 0x5c, 0x71, 0xa9, 0xf7, 0x8a, 0xe9, 0x9e, 0x7c, 0x76, 0x29, - 0x8f, 0x13, 0xce, 0xaf, 0x04, 0x79, 0x30, 0x45, 0x9b, 0x96, 0xac, 0x1b, 0x7a, 0xaf, 0xfb, 0xa1, - 0x7b, 0x07, 0xb3, 0x53, 0xeb, 0x69, 0x1e, 0x38, 0x8b, 0xf1, 0xcc, 0x12, 0x9c, 0xce, 0x9c, 0x9d, - 0x68, 0x02, 0x8a, 0xbb, 0x84, 0x4b, 0x5d, 0x25, 0x4c, 0x7f, 0xa2, 0x53, 0xd0, 0xbf, 0xe7, 0x34, - 0xda, 0x62, 0x61, 0x62, 0xfe, 0xe7, 0xe5, 0xc2, 0x4b, 0x96, 0xfd, 0xcf, 0x8b, 0x30, 0xbe, 0x54, - 0x2d, 0xdf, 0xd7, 0xaa, 0xd7, 0x8f, 0xbd, 0x42, 0xc7, 0x63, 0x2f, 0x3e, 0x44, 0x8b, 0xb9, 0x87, - 0xe8, 0x9f, 0xcd, 0x58, 0xb2, 0x7d, 0x6c, 0xc9, 0x7e, 0x47, 0xce, 0x92, 0x3d, 0xe6, 0x85, 0xba, - 0x97, 0x33, 0x6b, 0xfb, 0xd9, 0x00, 0x66, 0x4a, 0x48, 0xd7, 0xfd, 0x9a, 0xd3, 0x48, 0x6e, 0xb5, - 0x47, 0x9c, 0xba, 0xc7, 0x33, 0x8e, 0x35, 0x18, 0x59, 0x72, 0x5a, 0xce, 0xa6, 0xdb, 0x70, 0x23, - 0x97, 0x84, 0xe8, 0x49, 0x28, 0x3a, 0xf5, 0x3a, 0x93, 0xee, 0x4a, 0x8b, 0xa7, 0xef, 0x1d, 0xcc, - 0x16, 0x17, 0xea, 0x54, 0xcc, 0x00, 0x45, 0xb5, 0x8f, 0x29, 0x05, 0x7a, 0x2f, 0xf4, 0xd5, 0x03, - 0xbf, 0x35, 0x5d, 0x60, 0x94, 0x74, 0x95, 0xf7, 0x2d, 0x07, 0x7e, 0x2b, 0x41, 0xca, 0x68, 0xec, - 0x5f, 0x2d, 0xc0, 0x23, 0x4b, 0xa4, 0xb5, 0xb3, 0x5a, 0xcd, 0x39, 0x2f, 0x2e, 0xc1, 0x50, 0xd3, - 0xf7, 0xdc, 0xc8, 0x0f, 0x42, 0x51, 0x35, 0x9b, 0x11, 0x6b, 0x02, 0x86, 0x15, 0x16, 0x5d, 0x80, - 0xbe, 0x56, 0x2c, 0xc4, 0x8e, 0x48, 0x01, 0x98, 0x89, 0xaf, 0x0c, 0x43, 0x29, 0xda, 0x21, 0x09, - 0xc4, 0x8c, 0x51, 0x14, 0x37, 0x43, 0x12, 0x60, 0x86, 0x89, 0x25, 0x01, 0x2a, 0x23, 0x88, 0x13, - 0x21, 0x21, 0x09, 0x50, 0x0c, 0xd6, 0xa8, 0x50, 0x05, 0x4a, 0x61, 0x62, 0x64, 0x7b, 0x5a, 0x9a, - 0xa3, 0x4c, 0x54, 0x50, 0x23, 0x19, 0x33, 0x31, 0x4e, 0xb0, 0x81, 0xae, 0xa2, 0xc2, 0xd7, 0x0a, - 0x80, 0x78, 0x17, 0x7e, 0x8b, 0x75, 0xdc, 0xcd, 0x74, 0xc7, 0xf5, 0xbe, 0x24, 0x8e, 0xab, 0xf7, - 0xfe, 0x87, 0x05, 0x8f, 0x2c, 0xb9, 0x5e, 0x9d, 0x04, 0x39, 0x13, 0xf0, 0xc1, 0xdc, 0x9d, 0x8f, - 0x26, 0xa4, 0x18, 0x53, 0xac, 0xef, 0x18, 0xa6, 0x98, 0xfd, 0x47, 0x16, 0x20, 0xfe, 0xd9, 0xef, - 0xb8, 0x8f, 0xbd, 0x99, 0xfe, 0xd8, 0x63, 0x98, 0x16, 0xf6, 0xdf, 0xb5, 0x60, 0x78, 0xa9, 0xe1, - 0xb8, 0x4d, 0xf1, 0xa9, 0x4b, 0x30, 0x29, 0x15, 0x45, 0x0c, 0xac, 0xc9, 0xfe, 0x74, 0x73, 0x9b, - 0xc4, 0x49, 0x24, 0x4e, 0xd3, 0xa3, 0x8f, 0xc3, 0x59, 0x03, 0xb8, 0x41, 0x9a, 0xad, 0x86, 0x13, - 0xe9, 0xb7, 0x02, 0x76, 0xfa, 0xe3, 0x3c, 0x22, 0x9c, 0x5f, 0xde, 0xbe, 0x0e, 0x63, 0x4b, 0x0d, - 0x97, 0x78, 0x51, 0xb9, 0xb2, 0xe4, 0x7b, 0x5b, 0xee, 0x36, 0x7a, 0x19, 0xc6, 0x22, 0xb7, 0x49, - 0xfc, 0x76, 0x54, 0x25, 0x35, 0xdf, 0x63, 0x77, 0x6d, 0xeb, 0x52, 0xff, 0x22, 0xba, 0x77, 0x30, - 0x3b, 0xb6, 0x61, 0x60, 0x70, 0x82, 0xd2, 0xfe, 0x1d, 0x3a, 0xe2, 0x7e, 0xb3, 0xe5, 0x7b, 0xc4, - 0x8b, 0x96, 0x7c, 0xaf, 0xce, 0x75, 0x32, 0x2f, 0x43, 0x5f, 0x44, 0x47, 0x90, 0x7f, 0xf9, 0x45, - 0xb9, 0xb4, 0xe9, 0xb8, 0x1d, 0x1e, 0xcc, 0x9e, 0x49, 0x97, 0x60, 0x23, 0xcb, 0xca, 0xa0, 0xef, - 0x80, 0x81, 0x30, 0x72, 0xa2, 0x76, 0x28, 0x3e, 0xf5, 0x51, 0x39, 0xfe, 0x55, 0x06, 0x3d, 0x3c, - 0x98, 0x1d, 0x57, 0xc5, 0x38, 0x08, 0x8b, 0x02, 0xe8, 0x29, 0x18, 0x6c, 0x92, 0x30, 0x74, 0xb6, - 0xe5, 0xf9, 0x3d, 0x2e, 0xca, 0x0e, 0xae, 0x71, 0x30, 0x96, 0x78, 0xf4, 0x18, 0xf4, 0x93, 0x20, - 0xf0, 0x03, 0xb1, 0xab, 0x8c, 0x0a, 0xc2, 0xfe, 0x15, 0x0a, 0xc4, 0x1c, 0x67, 0xff, 0x5b, 0x0b, - 0xc6, 0x55, 0x5b, 0x79, 0x5d, 0x27, 0x70, 0x6f, 0xfa, 0x18, 0x40, 0x4d, 0x7e, 0x60, 0xc8, 0xce, - 0xbb, 0xe1, 0xe7, 0x2e, 0x66, 0x8a, 0x16, 0xa9, 0x6e, 0x8c, 0x39, 0x2b, 0x50, 0x88, 0x35, 0x6e, - 0xf6, 0x3f, 0xb2, 0x60, 0x2a, 0xf1, 0x45, 0xd7, 0xdd, 0x30, 0x42, 0x6f, 0xa4, 0xbe, 0x6a, 0xae, - 0xb7, 0xaf, 0xa2, 0xa5, 0xd9, 0x37, 0xa9, 0xc5, 0x27, 0x21, 0xda, 0x17, 0x5d, 0x85, 0x7e, 0x37, - 0x22, 0x4d, 0xf9, 0x31, 0x8f, 0x75, 0xfc, 0x18, 0xde, 0xaa, 0x78, 0x44, 0xca, 0xb4, 0x24, 0xe6, - 0x0c, 0xec, 0x5f, 0x2d, 0x42, 0x89, 0x4f, 0xdb, 0x35, 0xa7, 0x75, 0x02, 0x63, 0xf1, 0x34, 0x94, - 0xdc, 0x66, 0xb3, 0x1d, 0x39, 0x9b, 0xe2, 0x00, 0x1a, 0xe2, 0x9b, 0x41, 0x59, 0x02, 0x71, 0x8c, - 0x47, 0x65, 0xe8, 0x63, 0x4d, 0xe1, 0x5f, 0xf9, 0x64, 0xf6, 0x57, 0x8a, 0xb6, 0xcf, 0x2d, 0x3b, - 0x91, 0xc3, 0x65, 0x3f, 0x75, 0xf2, 0x51, 0x10, 0x66, 0x2c, 0x90, 0x03, 0xb0, 0xe9, 0x7a, 0x4e, - 0xb0, 0x4f, 0x61, 0xd3, 0x45, 0xc6, 0xf0, 0xd9, 0xce, 0x0c, 0x17, 0x15, 0x3d, 0x67, 0xab, 0x3e, - 0x2c, 0x46, 0x60, 0x8d, 0xe9, 0xcc, 0x07, 0xa0, 0xa4, 0x88, 0x8f, 0x22, 0xc2, 0xcd, 0x7c, 0x08, - 0xc6, 0x13, 0x75, 0x75, 0x2b, 0x3e, 0xa2, 0x4b, 0x80, 0xbf, 0xc8, 0xb6, 0x0c, 0xd1, 0xea, 0x15, - 0x6f, 0x4f, 0xec, 0x9c, 0x6f, 0xc1, 0xa9, 0x46, 0xc6, 0xde, 0x2b, 0xc6, 0xb5, 0xf7, 0xbd, 0xfa, - 0x11, 0xf1, 0xd9, 0xa7, 0xb2, 0xb0, 0x38, 0xb3, 0x0e, 0x2a, 0xd5, 0xf8, 0x2d, 0xba, 0x40, 0x9c, - 0x86, 0x7e, 0x41, 0xb8, 0x21, 0x60, 0x58, 0x61, 0xe9, 0x7e, 0x77, 0x4a, 0x35, 0xfe, 0x1a, 0xd9, - 0xaf, 0x92, 0x06, 0xa9, 0x45, 0x7e, 0xf0, 0x4d, 0x6d, 0xfe, 0x39, 0xde, 0xfb, 0x7c, 0xbb, 0x1c, - 0x16, 0x0c, 0x8a, 0xd7, 0xc8, 0x3e, 0x1f, 0x0a, 0xfd, 0xeb, 0x8a, 0x1d, 0xbf, 0xee, 0x2b, 0x16, - 0x8c, 0xaa, 0xaf, 0x3b, 0x81, 0x7d, 0x61, 0xd1, 0xdc, 0x17, 0xce, 0x75, 0x9c, 0xe0, 0x39, 0x3b, - 0xc2, 0xd7, 0x0a, 0x70, 0x56, 0xd1, 0xd0, 0xdb, 0x0c, 0xff, 0x23, 0x66, 0xd5, 0x3c, 0x94, 0x3c, - 0xa5, 0xd7, 0xb3, 0x4c, 0x85, 0x5a, 0xac, 0xd5, 0x8b, 0x69, 0xa8, 0x50, 0xea, 0xc5, 0xc7, 0xec, - 0x88, 0xae, 0xf0, 0x16, 0xca, 0xed, 0x45, 0x28, 0xb6, 0xdd, 0xba, 0x38, 0x60, 0xde, 0x27, 0x7b, - 0xfb, 0x66, 0x79, 0xf9, 0xf0, 0x60, 0xf6, 0xd1, 0x3c, 0x63, 0x0b, 0x3d, 0xd9, 0xc2, 0xb9, 0x9b, - 0xe5, 0x65, 0x4c, 0x0b, 0xa3, 0x05, 0x18, 0x97, 0x27, 0xf4, 0x2d, 0x2a, 0x20, 0xfa, 0x9e, 0x38, - 0x87, 0x94, 0xd6, 0x1a, 0x9b, 0x68, 0x9c, 0xa4, 0x47, 0xcb, 0x30, 0xb1, 0xdb, 0xde, 0x24, 0x0d, - 0x12, 0xf1, 0x0f, 0xbe, 0x46, 0xb8, 0x4e, 0xb7, 0x14, 0xdf, 0x25, 0xaf, 0x25, 0xf0, 0x38, 0x55, - 0xc2, 0xfe, 0x53, 0x76, 0x1e, 0x88, 0xde, 0xab, 0x04, 0x3e, 0x9d, 0x58, 0x94, 0xfb, 0x37, 0x73, - 0x3a, 0xf7, 0x32, 0x2b, 0xae, 0x91, 0xfd, 0x0d, 0x9f, 0xde, 0x25, 0xb2, 0x67, 0x85, 0x31, 0xe7, - 0xfb, 0x3a, 0xce, 0xf9, 0x9f, 0x2d, 0xc0, 0x69, 0xd5, 0x03, 0x86, 0xd8, 0xfa, 0xad, 0xde, 0x07, - 0x97, 0x61, 0xb8, 0x4e, 0xb6, 0x9c, 0x76, 0x23, 0x52, 0x06, 0x86, 0x7e, 0x6e, 0x64, 0x5a, 0x8e, - 0xc1, 0x58, 0xa7, 0x39, 0x42, 0xb7, 0xfd, 0xcf, 0x61, 0x76, 0x10, 0x47, 0x0e, 0x9d, 0xe3, 0x6a, - 0xd5, 0x58, 0xb9, 0xab, 0xe6, 0x31, 0xe8, 0x77, 0x9b, 0x54, 0x30, 0x2b, 0x98, 0xf2, 0x56, 0x99, - 0x02, 0x31, 0xc7, 0xa1, 0x27, 0x60, 0xb0, 0xe6, 0x37, 0x9b, 0x8e, 0x57, 0x67, 0x47, 0x5e, 0x69, - 0x71, 0x98, 0xca, 0x6e, 0x4b, 0x1c, 0x84, 0x25, 0x0e, 0x3d, 0x02, 0x7d, 0x4e, 0xb0, 0xcd, 0xb5, - 0x2e, 0xa5, 0xc5, 0x21, 0x5a, 0xd3, 0x42, 0xb0, 0x1d, 0x62, 0x06, 0xa5, 0x97, 0xc6, 0x3b, 0x7e, - 0xb0, 0xeb, 0x7a, 0xdb, 0xcb, 0x6e, 0x20, 0x96, 0x84, 0x3a, 0x0b, 0x6f, 0x2b, 0x0c, 0xd6, 0xa8, - 0xd0, 0x2a, 0xf4, 0xb7, 0xfc, 0x20, 0x0a, 0xa7, 0x07, 0x58, 0x77, 0x3f, 0x9a, 0xb3, 0x11, 0xf1, - 0xaf, 0xad, 0xf8, 0x41, 0x14, 0x7f, 0x00, 0xfd, 0x17, 0x62, 0x5e, 0x1c, 0x5d, 0x87, 0x41, 0xe2, - 0xed, 0xad, 0x06, 0x7e, 0x73, 0x7a, 0x2a, 0x9f, 0xd3, 0x0a, 0x27, 0xe1, 0xd3, 0x2c, 0x96, 0x51, - 0x05, 0x18, 0x4b, 0x16, 0xe8, 0x3b, 0xa0, 0x48, 0xbc, 0xbd, 0xe9, 0x41, 0xc6, 0x69, 0x26, 0x87, - 0xd3, 0x2d, 0x27, 0x88, 0xf7, 0xfc, 0x15, 0x6f, 0x0f, 0xd3, 0x32, 0xe8, 0xa3, 0x50, 0x92, 0x1b, - 0x46, 0x28, 0xd4, 0x99, 0x99, 0x13, 0x56, 0x6e, 0x33, 0x98, 0xbc, 0xd9, 0x76, 0x03, 0xd2, 0x24, - 0x5e, 0x14, 0xc6, 0x3b, 0xa4, 0xc4, 0x86, 0x38, 0xe6, 0x86, 0x3e, 0x2a, 0x75, 0xe8, 0x6b, 0x7e, - 0xdb, 0x8b, 0xc2, 0xe9, 0x12, 0x6b, 0x5e, 0xa6, 0x75, 0xf3, 0x56, 0x4c, 0x97, 0x54, 0xb2, 0xf3, - 0xc2, 0xd8, 0x60, 0x85, 0x3e, 0x01, 0xa3, 0xfc, 0x3f, 0xb7, 0x11, 0x86, 0xd3, 0xa7, 0x19, 0xef, - 0x0b, 0xf9, 0xbc, 0x39, 0xe1, 0xe2, 0x69, 0xc1, 0x7c, 0x54, 0x87, 0x86, 0xd8, 0xe4, 0x86, 0x30, - 0x8c, 0x36, 0xdc, 0x3d, 0xe2, 0x91, 0x30, 0xac, 0x04, 0xfe, 0x26, 0x11, 0x2a, 0xcf, 0xb3, 0xd9, - 0x36, 0x45, 0x7f, 0x93, 0x2c, 0x4e, 0x52, 0x9e, 0xd7, 0xf5, 0x32, 0xd8, 0x64, 0x81, 0x6e, 0xc2, - 0x18, 0xbd, 0x63, 0xba, 0x31, 0xd3, 0xe1, 0x6e, 0x4c, 0xd9, 0xbd, 0x0a, 0x1b, 0x85, 0x70, 0x82, - 0x09, 0xba, 0x01, 0x23, 0x61, 0xe4, 0x04, 0x51, 0xbb, 0xc5, 0x99, 0x9e, 0xe9, 0xc6, 0x94, 0x99, - 0xa4, 0xab, 0x5a, 0x11, 0x6c, 0x30, 0x40, 0xaf, 0x41, 0xa9, 0xe1, 0x6e, 0x91, 0xda, 0x7e, 0xad, - 0x41, 0xa6, 0x47, 0x18, 0xb7, 0xcc, 0x4d, 0xe5, 0xba, 0x24, 0xe2, 0x72, 0xae, 0xfa, 0x8b, 0xe3, - 0xe2, 0xe8, 0x16, 0x9c, 0x89, 0x48, 0xd0, 0x74, 0x3d, 0x87, 0x6e, 0x06, 0xe2, 0x6a, 0xc5, 0x4c, - 0xbd, 0xa3, 0x6c, 0xb5, 0x9d, 0x17, 0xa3, 0x71, 0x66, 0x23, 0x93, 0x0a, 0xe7, 0x94, 0x46, 0x77, - 0x61, 0x3a, 0x03, 0xe3, 0x37, 0xdc, 0xda, 0xfe, 0xf4, 0x29, 0xc6, 0xf9, 0x83, 0x82, 0xf3, 0xf4, - 0x46, 0x0e, 0xdd, 0x61, 0x07, 0x1c, 0xce, 0xe5, 0x8e, 0x6e, 0xc0, 0x38, 0xdb, 0x81, 0x2a, 0xed, - 0x46, 0x43, 0x54, 0x38, 0xc6, 0x2a, 0x7c, 0x42, 0x9e, 0xc7, 0x65, 0x13, 0x7d, 0x78, 0x30, 0x0b, - 0xf1, 0x3f, 0x9c, 0x2c, 0x8d, 0x36, 0x99, 0x55, 0xb1, 0x1d, 0xb8, 0xd1, 0x3e, 0xdd, 0x37, 0xc8, - 0xdd, 0x68, 0x7a, 0xbc, 0xa3, 0x86, 0x45, 0x27, 0x55, 0xa6, 0x47, 0x1d, 0x88, 0x93, 0x0c, 0xe9, - 0x96, 0x1a, 0x46, 0x75, 0xd7, 0x9b, 0x9e, 0xe0, 0xf7, 0x12, 0xb9, 0x23, 0x55, 0x29, 0x10, 0x73, - 0x1c, 0xb3, 0x28, 0xd2, 0x1f, 0x37, 0xe8, 0xc9, 0x35, 0xc9, 0x08, 0x63, 0x8b, 0xa2, 0x44, 0xe0, - 0x98, 0x86, 0x0a, 0x93, 0x51, 0xb4, 0x3f, 0x8d, 0x18, 0xa9, 0xda, 0x58, 0x36, 0x36, 0x3e, 0x8a, - 0x29, 0xdc, 0xde, 0x84, 0x31, 0xb5, 0x11, 0xb2, 0x3e, 0x41, 0xb3, 0xd0, 0xcf, 0xc4, 0x27, 0xa1, - 0x0f, 0x2c, 0xd1, 0x26, 0x30, 0xd1, 0x0a, 0x73, 0x38, 0x6b, 0x82, 0xfb, 0x16, 0x59, 0xdc, 0x8f, - 0x08, 0xbf, 0xd3, 0x17, 0xb5, 0x26, 0x48, 0x04, 0x8e, 0x69, 0xec, 0xff, 0xc7, 0xc5, 0xd0, 0x78, - 0xb7, 0xed, 0xe1, 0x7c, 0x79, 0x06, 0x86, 0x76, 0xfc, 0x30, 0xa2, 0xd4, 0xac, 0x8e, 0xfe, 0x58, - 0xf0, 0xbc, 0x2a, 0xe0, 0x58, 0x51, 0xa0, 0x57, 0x60, 0xb4, 0xa6, 0x57, 0x20, 0x0e, 0x47, 0xb5, - 0x8d, 0x18, 0xb5, 0x63, 0x93, 0x16, 0xbd, 0x04, 0x43, 0xcc, 0x4b, 0xa6, 0xe6, 0x37, 0x84, 0xd4, - 0x26, 0x4f, 0xf8, 0xa1, 0x8a, 0x80, 0x1f, 0x6a, 0xbf, 0xb1, 0xa2, 0x46, 0x17, 0x61, 0x80, 0x36, - 0xa1, 0x5c, 0x11, 0xc7, 0x92, 0x52, 0x6d, 0x5d, 0x65, 0x50, 0x2c, 0xb0, 0xf6, 0x5f, 0x2c, 0x68, - 0xbd, 0x4c, 0xef, 0xc3, 0x04, 0x55, 0x60, 0xf0, 0x8e, 0xe3, 0x46, 0xae, 0xb7, 0x2d, 0xe4, 0x8f, - 0xa7, 0x3a, 0x9e, 0x51, 0xac, 0xd0, 0x6d, 0x5e, 0x80, 0x9f, 0xa2, 0xe2, 0x0f, 0x96, 0x6c, 0x28, - 0xc7, 0xa0, 0xed, 0x79, 0x94, 0x63, 0xa1, 0x57, 0x8e, 0x98, 0x17, 0xe0, 0x1c, 0xc5, 0x1f, 0x2c, - 0xd9, 0xa0, 0x37, 0x00, 0xe4, 0x0a, 0x23, 0x75, 0xe1, 0x9d, 0xf2, 0x4c, 0x77, 0xa6, 0x1b, 0xaa, - 0xcc, 0xe2, 0x18, 0x3d, 0xa3, 0xe3, 0xff, 0x58, 0xe3, 0x67, 0x47, 0x4c, 0x4e, 0x4b, 0x37, 0x06, - 0x7d, 0x9c, 0x4e, 0x71, 0x27, 0x88, 0x48, 0x7d, 0x21, 0x12, 0x9d, 0xf3, 0xde, 0xde, 0x2e, 0x29, - 0x1b, 0x6e, 0x93, 0xe8, 0xcb, 0x41, 0x30, 0xc1, 0x31, 0x3f, 0xfb, 0xe7, 0x8b, 0x30, 0x9d, 0xd7, - 0x5c, 0x3a, 0xe9, 0xc8, 0x5d, 0x37, 0x5a, 0xa2, 0xe2, 0x95, 0x65, 0x4e, 0xba, 0x15, 0x01, 0xc7, - 0x8a, 0x82, 0x8e, 0x7e, 0xe8, 0x6e, 0xcb, 0x3b, 0x66, 0x7f, 0x3c, 0xfa, 0x55, 0x06, 0xc5, 0x02, - 0x4b, 0xe9, 0x02, 0xe2, 0x84, 0xc2, 0xfd, 0x49, 0x9b, 0x25, 0x98, 0x41, 0xb1, 0xc0, 0xea, 0xda, - 0xae, 0xbe, 0x2e, 0xda, 0x2e, 0xa3, 0x8b, 0xfa, 0x8f, 0xb7, 0x8b, 0xd0, 0x27, 0x01, 0xb6, 0x5c, - 0xcf, 0x0d, 0x77, 0x18, 0xf7, 0x81, 0x23, 0x73, 0x57, 0xc2, 0xd9, 0xaa, 0xe2, 0x82, 0x35, 0x8e, - 0xe8, 0x45, 0x18, 0x56, 0x0b, 0xb0, 0xbc, 0xcc, 0x6c, 0xc1, 0x9a, 0x6f, 0x4d, 0xbc, 0x1b, 0x2d, - 0x63, 0x9d, 0xce, 0xfe, 0x74, 0x72, 0xbe, 0x88, 0x15, 0xa0, 0xf5, 0xaf, 0xd5, 0x6b, 0xff, 0x16, - 0x3a, 0xf7, 0xaf, 0xfd, 0x8d, 0x22, 0x8c, 0x1b, 0x95, 0xb5, 0xc3, 0x1e, 0xf6, 0xac, 0x2b, 0x74, - 0x03, 0x77, 0x22, 0x22, 0xd6, 0x9f, 0xdd, 0x7d, 0xa9, 0xe8, 0x9b, 0x3c, 0x5d, 0x01, 0xbc, 0x3c, - 0xfa, 0x24, 0x94, 0x1a, 0x4e, 0xc8, 0x34, 0x67, 0x44, 0xac, 0xbb, 0x5e, 0x98, 0xc5, 0x17, 0x13, - 0x27, 0x8c, 0xb4, 0x53, 0x93, 0xf3, 0x8e, 0x59, 0xd2, 0x93, 0x86, 0xca, 0x27, 0xd2, 0xbf, 0x4e, - 0x35, 0x82, 0x0a, 0x31, 0xfb, 0x98, 0xe3, 0xd0, 0x4b, 0x30, 0x12, 0x10, 0x36, 0x2b, 0x96, 0xa8, - 0x34, 0xc7, 0xa6, 0x59, 0x7f, 0x2c, 0xf6, 0x61, 0x0d, 0x87, 0x0d, 0xca, 0xf8, 0x6e, 0x30, 0xd0, - 0xe1, 0x6e, 0xf0, 0x14, 0x0c, 0xb2, 0x1f, 0x6a, 0x06, 0xa8, 0xd1, 0x28, 0x73, 0x30, 0x96, 0xf8, - 0xe4, 0x84, 0x19, 0xea, 0x6d, 0xc2, 0xd0, 0xdb, 0x87, 0x98, 0xd4, 0xcc, 0x0e, 0x3f, 0xc4, 0x77, - 0x39, 0x31, 0xe5, 0xb1, 0xc4, 0xd9, 0xef, 0x85, 0xb1, 0x65, 0x87, 0x34, 0x7d, 0x6f, 0xc5, 0xab, - 0xb7, 0x7c, 0xd7, 0x8b, 0xd0, 0x34, 0xf4, 0xb1, 0x43, 0x84, 0x6f, 0x01, 0x7d, 0xb4, 0x22, 0xcc, - 0x20, 0xf6, 0x36, 0x9c, 0x5e, 0xf6, 0xef, 0x78, 0x77, 0x9c, 0xa0, 0xbe, 0x50, 0x29, 0x6b, 0xf7, - 0xeb, 0x75, 0x79, 0xbf, 0xe3, 0x6e, 0x6d, 0x99, 0x5b, 0xaf, 0x56, 0x92, 0x8b, 0xb5, 0xab, 0x6e, - 0x83, 0xe4, 0x68, 0x41, 0xfe, 0x4a, 0xc1, 0xa8, 0x29, 0xa6, 0x57, 0x76, 0x38, 0x2b, 0xd7, 0x0e, - 0xf7, 0x3a, 0x0c, 0x6d, 0xb9, 0xa4, 0x51, 0xc7, 0x64, 0x4b, 0xcc, 0xc4, 0x27, 0xf3, 0x3d, 0x75, - 0x56, 0x29, 0xa5, 0xd4, 0x7a, 0xf1, 0xdb, 0xe1, 0xaa, 0x28, 0x8c, 0x15, 0x1b, 0xb4, 0x0b, 0x13, - 0xf2, 0xc2, 0x20, 0xb1, 0x62, 0x5e, 0x3e, 0xd5, 0xe9, 0x16, 0x62, 0x32, 0x3f, 0x75, 0xef, 0x60, - 0x76, 0x02, 0x27, 0xd8, 0xe0, 0x14, 0x63, 0x7a, 0x1d, 0x6c, 0xd2, 0x1d, 0xb8, 0x8f, 0x75, 0x3f, - 0xbb, 0x0e, 0xb2, 0x9b, 0x2d, 0x83, 0xda, 0x3f, 0x66, 0xc1, 0x43, 0xa9, 0x9e, 0x11, 0x37, 0xfc, - 0x63, 0x1e, 0x85, 0xe4, 0x8d, 0xbb, 0xd0, 0xfd, 0xc6, 0x6d, 0xff, 0x2d, 0x0b, 0x4e, 0xad, 0x34, - 0x5b, 0xd1, 0xfe, 0xb2, 0x6b, 0x1a, 0xcd, 0x3e, 0x00, 0x03, 0x4d, 0x52, 0x77, 0xdb, 0x4d, 0x31, - 0x72, 0xb3, 0x72, 0x97, 0x5a, 0x63, 0xd0, 0xc3, 0x83, 0xd9, 0xd1, 0x6a, 0xe4, 0x07, 0xce, 0x36, - 0xe1, 0x00, 0x2c, 0xc8, 0xd9, 0x5e, 0xef, 0xbe, 0x45, 0xae, 0xbb, 0x4d, 0x57, 0x7a, 0x5e, 0x75, - 0xd4, 0xd9, 0xcd, 0xc9, 0x0e, 0x9d, 0x7b, 0xbd, 0xed, 0x78, 0x91, 0x1b, 0xed, 0x0b, 0x7b, 0x97, - 0x64, 0x82, 0x63, 0x7e, 0xf6, 0xd7, 0x2d, 0x18, 0x97, 0xf3, 0x7e, 0xa1, 0x5e, 0x0f, 0x48, 0x18, - 0xa2, 0x19, 0x28, 0xb8, 0x2d, 0xd1, 0x4a, 0x10, 0xad, 0x2c, 0x94, 0x2b, 0xb8, 0xe0, 0xb6, 0xa4, - 0x58, 0xc6, 0x36, 0xc2, 0xa2, 0x69, 0xfa, 0xbb, 0x2a, 0xe0, 0x58, 0x51, 0xa0, 0x4b, 0x30, 0xe4, - 0xf9, 0x75, 0x6e, 0xe7, 0xe2, 0x47, 0x1a, 0x9b, 0x60, 0xeb, 0x02, 0x86, 0x15, 0x16, 0x55, 0xa0, - 0xc4, 0x1d, 0xc3, 0xe2, 0x49, 0xdb, 0x93, 0x7b, 0x19, 0xfb, 0xb2, 0x0d, 0x59, 0x12, 0xc7, 0x4c, - 0xec, 0x5f, 0xb1, 0x60, 0x44, 0x7e, 0x59, 0x8f, 0x32, 0x27, 0x5d, 0x5a, 0xb1, 0xbc, 0x19, 0x2f, - 0x2d, 0x2a, 0x33, 0x32, 0x8c, 0x21, 0x2a, 0x16, 0x8f, 0x24, 0x2a, 0x5e, 0x86, 0x61, 0xa7, 0xd5, - 0xaa, 0x98, 0x72, 0x26, 0x9b, 0x4a, 0x0b, 0x31, 0x18, 0xeb, 0x34, 0xf6, 0x8f, 0x16, 0x60, 0x4c, - 0x7e, 0x41, 0xb5, 0xbd, 0x19, 0x92, 0x08, 0x6d, 0x40, 0xc9, 0xe1, 0xa3, 0x44, 0xe4, 0x24, 0x7f, - 0x2c, 0x5b, 0x8f, 0x60, 0x0c, 0x69, 0x7c, 0xe0, 0x2f, 0xc8, 0xd2, 0x38, 0x66, 0x84, 0x1a, 0x30, - 0xe9, 0xf9, 0x11, 0xdb, 0xfc, 0x15, 0xbe, 0x93, 0x69, 0x27, 0xc9, 0xfd, 0xac, 0xe0, 0x3e, 0xb9, - 0x9e, 0xe4, 0x82, 0xd3, 0x8c, 0xd1, 0x8a, 0xd4, 0xcd, 0x14, 0xf3, 0x95, 0x01, 0xfa, 0xc0, 0x65, - 0xab, 0x66, 0xec, 0x5f, 0xb2, 0xa0, 0x24, 0xc9, 0x4e, 0xc2, 0x8a, 0xb7, 0x06, 0x83, 0x21, 0x1b, - 0x04, 0xd9, 0x35, 0x76, 0xa7, 0x86, 0xf3, 0xf1, 0x8a, 0xcf, 0x34, 0xfe, 0x3f, 0xc4, 0x92, 0x07, - 0x53, 0xcd, 0xab, 0xe6, 0xbf, 0x43, 0x54, 0xf3, 0xaa, 0x3d, 0x39, 0x87, 0xd2, 0x1f, 0xb0, 0x36, - 0x6b, 0xba, 0x2e, 0x2a, 0x7a, 0xb5, 0x02, 0xb2, 0xe5, 0xde, 0x4d, 0x8a, 0x5e, 0x15, 0x06, 0xc5, - 0x02, 0x8b, 0xde, 0x80, 0x91, 0x9a, 0xd4, 0xc9, 0xc6, 0x2b, 0xfc, 0x62, 0x47, 0xfb, 0x80, 0x32, - 0x25, 0x71, 0x5d, 0xc8, 0x92, 0x56, 0x1e, 0x1b, 0xdc, 0x4c, 0xc7, 0x87, 0x62, 0x37, 0xc7, 0x87, - 0x98, 0x6f, 0xbe, 0x1b, 0xc0, 0x8f, 0x5b, 0x30, 0xc0, 0x75, 0x71, 0xbd, 0xa9, 0x42, 0x35, 0xcb, - 0x5a, 0xdc, 0x77, 0xb7, 0x28, 0x50, 0x58, 0xca, 0xd0, 0x1a, 0x94, 0xd8, 0x0f, 0xa6, 0x4b, 0x2c, - 0xe6, 0xbf, 0x4b, 0xe0, 0xb5, 0xea, 0x0d, 0xbc, 0x25, 0x8b, 0xe1, 0x98, 0x83, 0xfd, 0x23, 0x45, - 0xba, 0xbb, 0xc5, 0xa4, 0xc6, 0xa1, 0x6f, 0x3d, 0xb8, 0x43, 0xbf, 0xf0, 0xa0, 0x0e, 0xfd, 0x6d, - 0x18, 0xaf, 0x69, 0x76, 0xb8, 0x78, 0x24, 0x2f, 0x75, 0x9c, 0x24, 0x9a, 0xc9, 0x8e, 0x6b, 0x59, - 0x96, 0x4c, 0x26, 0x38, 0xc9, 0x15, 0x7d, 0x1c, 0x46, 0xf8, 0x38, 0x8b, 0x5a, 0xb8, 0xef, 0xc8, - 0x13, 0xf9, 0xf3, 0x45, 0xaf, 0x82, 0x6b, 0xe5, 0xb4, 0xe2, 0xd8, 0x60, 0x66, 0xff, 0xb1, 0x05, - 0x68, 0xa5, 0xb5, 0x43, 0x9a, 0x24, 0x70, 0x1a, 0xb1, 0x3a, 0xfd, 0x07, 0x2d, 0x98, 0x26, 0x29, - 0xf0, 0x92, 0xdf, 0x6c, 0x8a, 0x4b, 0x4b, 0xce, 0xbd, 0x7a, 0x25, 0xa7, 0x8c, 0x7a, 0xb8, 0x31, - 0x9d, 0x47, 0x81, 0x73, 0xeb, 0x43, 0x6b, 0x30, 0xc5, 0x4f, 0x49, 0x85, 0xd0, 0xfc, 0x50, 0x1e, - 0x16, 0x8c, 0xa7, 0x36, 0xd2, 0x24, 0x38, 0xab, 0x9c, 0xfd, 0x3d, 0x23, 0x90, 0xdb, 0x8a, 0x77, - 0xed, 0x08, 0xef, 0xda, 0x11, 0xde, 0xb5, 0x23, 0xbc, 0x6b, 0x47, 0x78, 0xd7, 0x8e, 0xf0, 0x6d, - 0x6f, 0x47, 0xf8, 0x4b, 0x16, 0x9c, 0x56, 0xc7, 0x80, 0x71, 0xf1, 0xfd, 0x0c, 0x4c, 0xf1, 0xe5, - 0x66, 0xf8, 0x2e, 0x8a, 0x63, 0xef, 0x72, 0xe6, 0xcc, 0x4d, 0xf8, 0xd8, 0x1a, 0x05, 0xf9, 0x63, - 0x85, 0x0c, 0x04, 0xce, 0xaa, 0xc6, 0xfe, 0xf9, 0x21, 0xe8, 0x5f, 0xd9, 0x23, 0x5e, 0x74, 0x02, - 0x57, 0x84, 0x1a, 0x8c, 0xb9, 0xde, 0x9e, 0xdf, 0xd8, 0x23, 0x75, 0x8e, 0x3f, 0xca, 0x4d, 0xf6, - 0x8c, 0x60, 0x3d, 0x56, 0x36, 0x58, 0xe0, 0x04, 0xcb, 0x07, 0xa1, 0x4d, 0xbe, 0x02, 0x03, 0x7c, - 0x13, 0x17, 0xaa, 0xe4, 0xcc, 0x3d, 0x9b, 0x75, 0xa2, 0x38, 0x9a, 0x62, 0x4d, 0x37, 0x3f, 0x24, - 0x44, 0x71, 0xf4, 0x69, 0x18, 0xdb, 0x72, 0x83, 0x30, 0xda, 0x70, 0x9b, 0x24, 0x8c, 0x9c, 0x66, - 0xeb, 0x3e, 0xb4, 0xc7, 0xaa, 0x1f, 0x56, 0x0d, 0x4e, 0x38, 0xc1, 0x19, 0x6d, 0xc3, 0x68, 0xc3, - 0xd1, 0xab, 0x1a, 0x3c, 0x72, 0x55, 0xea, 0x74, 0xb8, 0xae, 0x33, 0xc2, 0x26, 0x5f, 0xba, 0x9c, - 0x6a, 0x4c, 0x01, 0x3a, 0xc4, 0xd4, 0x02, 0x6a, 0x39, 0x71, 0xcd, 0x27, 0xc7, 0x51, 0x41, 0x87, - 0x39, 0xc8, 0x96, 0x4c, 0x41, 0x47, 0x73, 0x83, 0xfd, 0x14, 0x94, 0x08, 0xed, 0x42, 0xca, 0x58, - 0x1c, 0x30, 0xf3, 0xbd, 0xb5, 0x75, 0xcd, 0xad, 0x05, 0xbe, 0xa9, 0xb7, 0x5f, 0x91, 0x9c, 0x70, - 0xcc, 0x14, 0x2d, 0xc1, 0x40, 0x48, 0x02, 0x97, 0x84, 0xe2, 0xa8, 0xe9, 0x30, 0x8c, 0x8c, 0x8c, - 0xbf, 0x86, 0xe1, 0xbf, 0xb1, 0x28, 0x4a, 0xa7, 0x97, 0xc3, 0x54, 0x9a, 0xec, 0x30, 0xd0, 0xa6, - 0xd7, 0x02, 0x83, 0x62, 0x81, 0x45, 0xaf, 0xc1, 0x60, 0x40, 0x1a, 0xcc, 0x30, 0x34, 0xda, 0xfb, - 0x24, 0xe7, 0x76, 0x26, 0x5e, 0x0e, 0x4b, 0x06, 0xe8, 0x1a, 0xa0, 0x80, 0x50, 0x41, 0xc9, 0xf5, - 0xb6, 0x95, 0xdb, 0xa8, 0xd8, 0x68, 0x95, 0x40, 0x8a, 0x63, 0x0a, 0xf9, 0x10, 0x0a, 0x67, 0x14, - 0x43, 0x57, 0x60, 0x52, 0x41, 0xcb, 0x5e, 0x18, 0x39, 0x74, 0x83, 0x1b, 0x67, 0xbc, 0x94, 0x9e, - 0x02, 0x27, 0x09, 0x70, 0xba, 0x8c, 0xfd, 0x25, 0x0b, 0x78, 0x3f, 0x9f, 0xc0, 0xed, 0xfc, 0x55, - 0xf3, 0x76, 0x7e, 0x36, 0x77, 0xe4, 0x72, 0x6e, 0xe6, 0x5f, 0xb2, 0x60, 0x58, 0x1b, 0xd9, 0x78, - 0xce, 0x5a, 0x1d, 0xe6, 0x6c, 0x1b, 0x26, 0xe8, 0x4c, 0xbf, 0xb1, 0x19, 0x92, 0x60, 0x8f, 0xd4, - 0xd9, 0xc4, 0x2c, 0xdc, 0xdf, 0xc4, 0x54, 0x2e, 0x6a, 0xd7, 0x13, 0x0c, 0x71, 0xaa, 0x0a, 0xfb, - 0x53, 0xb2, 0xa9, 0xca, 0xa3, 0xaf, 0xa6, 0xc6, 0x3c, 0xe1, 0xd1, 0xa7, 0x46, 0x15, 0xc7, 0x34, - 0x74, 0xa9, 0xed, 0xf8, 0x61, 0x94, 0xf4, 0xe8, 0xbb, 0xea, 0x87, 0x11, 0x66, 0x18, 0xfb, 0x79, - 0x80, 0x95, 0xbb, 0xa4, 0xc6, 0x67, 0xac, 0x7e, 0x79, 0xb0, 0xf2, 0x2f, 0x0f, 0xf6, 0x6f, 0x5a, - 0x30, 0xb6, 0xba, 0x64, 0x9c, 0x5c, 0x73, 0x00, 0xfc, 0xc6, 0x73, 0xfb, 0xf6, 0xba, 0x34, 0x87, - 0x73, 0x8b, 0xa6, 0x82, 0x62, 0x8d, 0x02, 0x9d, 0x85, 0x62, 0xa3, 0xed, 0x09, 0xf5, 0xe1, 0x20, - 0x3d, 0x1e, 0xaf, 0xb7, 0x3d, 0x4c, 0x61, 0xda, 0x23, 0x88, 0x62, 0xcf, 0x8f, 0x20, 0xba, 0x06, - 0x3f, 0x40, 0xb3, 0xd0, 0x7f, 0xe7, 0x8e, 0x5b, 0xe7, 0x4f, 0x4c, 0x85, 0xa9, 0xfe, 0xf6, 0xed, - 0xf2, 0x72, 0x88, 0x39, 0xdc, 0xfe, 0x42, 0x11, 0x66, 0x56, 0x1b, 0xe4, 0xee, 0xdb, 0x7c, 0x66, - 0xdb, 0xeb, 0x13, 0x8e, 0xa3, 0x29, 0x62, 0x8e, 0xfa, 0x4c, 0xa7, 0x7b, 0x7f, 0x6c, 0xc1, 0x20, - 0x77, 0x68, 0x93, 0x8f, 0x6e, 0x5f, 0xc9, 0xaa, 0x3d, 0xbf, 0x43, 0xe6, 0xb8, 0x63, 0x9c, 0x78, - 0xc3, 0xa7, 0x0e, 0x4c, 0x01, 0xc5, 0x92, 0xf9, 0xcc, 0xcb, 0x30, 0xa2, 0x53, 0x1e, 0xe9, 0xc1, - 0xdc, 0x9f, 0x2b, 0xc2, 0x04, 0x6d, 0xc1, 0x03, 0x1d, 0x88, 0x9b, 0xe9, 0x81, 0x38, 0xee, 0x47, - 0x53, 0xdd, 0x47, 0xe3, 0x8d, 0xe4, 0x68, 0x5c, 0xce, 0x1b, 0x8d, 0x93, 0x1e, 0x83, 0xef, 0xb6, - 0x60, 0x6a, 0xb5, 0xe1, 0xd7, 0x76, 0x13, 0x0f, 0x9b, 0x5e, 0x84, 0x61, 0xba, 0x1d, 0x87, 0xc6, - 0x1b, 0x7f, 0x23, 0xea, 0x83, 0x40, 0x61, 0x9d, 0x4e, 0x2b, 0x76, 0xf3, 0x66, 0x79, 0x39, 0x2b, - 0x58, 0x84, 0x40, 0x61, 0x9d, 0xce, 0xfe, 0x75, 0x0b, 0xce, 0x5d, 0x59, 0x5a, 0x89, 0xa7, 0x62, - 0x2a, 0x5e, 0xc5, 0x45, 0x18, 0x68, 0xd5, 0xb5, 0xa6, 0xc4, 0xea, 0xd5, 0x65, 0xd6, 0x0a, 0x81, - 0x7d, 0xa7, 0xc4, 0x62, 0xb9, 0x09, 0x70, 0x05, 0x57, 0x96, 0xc4, 0xbe, 0x2b, 0xad, 0x29, 0x56, - 0xae, 0x35, 0xe5, 0x09, 0x18, 0xa4, 0xe7, 0x82, 0x5b, 0x93, 0xed, 0xe6, 0x06, 0x5a, 0x0e, 0xc2, - 0x12, 0x67, 0xff, 0x8c, 0x05, 0x53, 0x57, 0xdc, 0x88, 0x1e, 0xda, 0xc9, 0x80, 0x0c, 0xf4, 0xd4, - 0x0e, 0xdd, 0xc8, 0x0f, 0xf6, 0x93, 0x01, 0x19, 0xb0, 0xc2, 0x60, 0x8d, 0x8a, 0x7f, 0xd0, 0x9e, - 0xcb, 0x3c, 0xb4, 0x0b, 0xa6, 0xfd, 0x0a, 0x0b, 0x38, 0x56, 0x14, 0xb4, 0xbf, 0xea, 0x6e, 0xc0, - 0x54, 0x7f, 0xfb, 0x62, 0xe3, 0x56, 0xfd, 0xb5, 0x2c, 0x11, 0x38, 0xa6, 0xb1, 0xff, 0xd0, 0x82, - 0xd9, 0x2b, 0x8d, 0x76, 0x18, 0x91, 0x60, 0x2b, 0xcc, 0xd9, 0x74, 0x9f, 0x87, 0x12, 0x91, 0x8a, - 0x76, 0xf9, 0x94, 0x4c, 0x0a, 0xa2, 0x4a, 0x03, 0xcf, 0xe3, 0x42, 0x28, 0xba, 0x1e, 0x5e, 0x5f, - 0x1e, 0xed, 0xf9, 0xdc, 0x2a, 0x20, 0xa2, 0xd7, 0xa5, 0x07, 0xca, 0x60, 0x2f, 0xee, 0x57, 0x52, - 0x58, 0x9c, 0x51, 0xc2, 0xfe, 0x31, 0x0b, 0x4e, 0xab, 0x0f, 0x7e, 0xc7, 0x7d, 0xa6, 0xfd, 0xd5, - 0x02, 0x8c, 0x5e, 0xdd, 0xd8, 0xa8, 0x5c, 0x21, 0x91, 0x36, 0x2b, 0x3b, 0x9b, 0xcf, 0xb1, 0x66, - 0x05, 0xec, 0x74, 0x47, 0x6c, 0x47, 0x6e, 0x63, 0x8e, 0xc7, 0x5b, 0x9a, 0x2b, 0x7b, 0xd1, 0x8d, - 0xa0, 0x1a, 0x05, 0xae, 0xb7, 0x9d, 0x39, 0xd3, 0xa5, 0xcc, 0x52, 0xcc, 0x93, 0x59, 0xd0, 0xf3, - 0x30, 0xc0, 0x02, 0x3e, 0xc9, 0x41, 0x78, 0x58, 0x5d, 0xb1, 0x18, 0xf4, 0xf0, 0x60, 0xb6, 0x74, - 0x13, 0x97, 0xf9, 0x1f, 0x2c, 0x48, 0xd1, 0x4d, 0x18, 0xde, 0x89, 0xa2, 0xd6, 0x55, 0xe2, 0xd4, - 0x49, 0x20, 0x77, 0xd9, 0xf3, 0x59, 0xbb, 0x2c, 0xed, 0x04, 0x4e, 0x16, 0x6f, 0x4c, 0x31, 0x2c, - 0xc4, 0x3a, 0x1f, 0xbb, 0x0a, 0x10, 0xe3, 0x8e, 0xc9, 0x00, 0x62, 0x6f, 0x40, 0x89, 0x7e, 0xee, - 0x42, 0xc3, 0x75, 0x3a, 0x9b, 0x98, 0x9f, 0x86, 0x92, 0x34, 0x20, 0x87, 0xe2, 0x75, 0x38, 0x3b, - 0x91, 0xa4, 0x7d, 0x39, 0xc4, 0x31, 0xde, 0xde, 0x82, 0x53, 0xcc, 0x1d, 0xd0, 0x89, 0x76, 0x8c, - 0xd9, 0xd7, 0x7d, 0x98, 0x9f, 0x11, 0x37, 0x36, 0xde, 0xe6, 0x69, 0xed, 0x39, 0xe3, 0x88, 0xe4, - 0x18, 0xdf, 0xde, 0xec, 0x6f, 0xf4, 0xc1, 0xc3, 0xe5, 0x6a, 0x7e, 0xc0, 0x92, 0x97, 0x60, 0x84, - 0x0b, 0x82, 0x74, 0xd0, 0x9d, 0x86, 0xa8, 0x57, 0xe9, 0x36, 0x37, 0x34, 0x1c, 0x36, 0x28, 0xd1, - 0x39, 0x28, 0xba, 0x6f, 0x7a, 0xc9, 0xc7, 0x3e, 0xe5, 0xd7, 0xd7, 0x31, 0x85, 0x53, 0x34, 0x95, - 0x29, 0xf9, 0x66, 0xad, 0xd0, 0x4a, 0xae, 0x7c, 0x15, 0xc6, 0xdc, 0xb0, 0x16, 0xba, 0x65, 0x8f, - 0xae, 0x40, 0x6d, 0x0d, 0x2b, 0x6d, 0x02, 0x6d, 0xb4, 0xc2, 0xe2, 0x04, 0xb5, 0x76, 0x72, 0xf4, - 0xf7, 0x2c, 0x97, 0x76, 0x7d, 0x2e, 0x4d, 0x37, 0xf6, 0x16, 0xfb, 0xba, 0x90, 0x29, 0xa9, 0xc5, - 0xc6, 0xce, 0x3f, 0x38, 0xc4, 0x12, 0x47, 0xaf, 0x6a, 0xb5, 0x1d, 0xa7, 0xb5, 0xd0, 0x8e, 0x76, - 0x96, 0xdd, 0xb0, 0xe6, 0xef, 0x91, 0x60, 0x9f, 0xdd, 0xb2, 0x87, 0xe2, 0xab, 0x9a, 0x42, 0x2c, - 0x5d, 0x5d, 0xa8, 0x50, 0x4a, 0x9c, 0x2e, 0x83, 0x16, 0x60, 0x5c, 0x02, 0xab, 0x24, 0x64, 0x9b, - 0xfb, 0x30, 0x63, 0xa3, 0x9e, 0xdf, 0x08, 0xb0, 0x62, 0x92, 0xa4, 0x37, 0x45, 0x57, 0x38, 0x0e, - 0xd1, 0xf5, 0x03, 0x30, 0xea, 0x7a, 0x6e, 0xe4, 0x3a, 0x91, 0xcf, 0x2d, 0x2c, 0xfc, 0x42, 0xcd, - 0x54, 0xc7, 0x65, 0x1d, 0x81, 0x4d, 0x3a, 0xfb, 0x3f, 0xf5, 0xc1, 0x24, 0x1b, 0xb6, 0x77, 0x67, - 0xd8, 0xb7, 0xd3, 0x0c, 0xbb, 0x99, 0x9e, 0x61, 0xc7, 0x21, 0x93, 0xdf, 0xf7, 0x34, 0xfb, 0x34, - 0x94, 0xd4, 0x8b, 0x23, 0xf9, 0xe4, 0xd0, 0xca, 0x79, 0x72, 0xd8, 0xfd, 0x5c, 0x96, 0x4e, 0x5b, - 0xc5, 0x4c, 0xa7, 0xad, 0x2f, 0x5b, 0x10, 0x9b, 0x0c, 0xd0, 0xeb, 0x50, 0x6a, 0xf9, 0xcc, 0x17, - 0x31, 0x90, 0x0e, 0xbe, 0x8f, 0x77, 0xb4, 0x39, 0xf0, 0x98, 0x4d, 0x01, 0xef, 0x85, 0x8a, 0x2c, - 0x8a, 0x63, 0x2e, 0xe8, 0x1a, 0x0c, 0xb6, 0x02, 0x52, 0x8d, 0x58, 0x40, 0x91, 0xde, 0x19, 0xf2, - 0x59, 0xc3, 0x0b, 0x62, 0xc9, 0xc1, 0xfe, 0xcf, 0x16, 0x4c, 0x24, 0x49, 0xd1, 0x07, 0xa1, 0x8f, - 0xdc, 0x25, 0x35, 0xd1, 0xde, 0xcc, 0x43, 0x36, 0x56, 0x3a, 0xf0, 0x0e, 0xa0, 0xff, 0x31, 0x2b, - 0x85, 0xae, 0xc2, 0x20, 0x3d, 0x61, 0xaf, 0xa8, 0xe0, 0x59, 0x8f, 0xe6, 0x9d, 0xd2, 0x4a, 0x54, - 0xe1, 0x8d, 0x13, 0x20, 0x2c, 0x8b, 0x33, 0x4f, 0xa9, 0x5a, 0xab, 0x4a, 0x2f, 0x2f, 0x51, 0xa7, - 0x3b, 0xf6, 0xc6, 0x52, 0x85, 0x13, 0x09, 0x6e, 0xdc, 0x53, 0x4a, 0x02, 0x71, 0xcc, 0xc4, 0xfe, - 0x59, 0x0b, 0x80, 0x3b, 0x86, 0x39, 0xde, 0x36, 0x39, 0x01, 0x3d, 0xf9, 0x32, 0xf4, 0x85, 0x2d, - 0x52, 0xeb, 0xe4, 0x26, 0x1b, 0xb7, 0xa7, 0xda, 0x22, 0xb5, 0x78, 0xc6, 0xd1, 0x7f, 0x98, 0x95, - 0xb6, 0xbf, 0x17, 0x60, 0x2c, 0x26, 0x2b, 0x47, 0xa4, 0x89, 0x9e, 0x35, 0xc2, 0x14, 0x9c, 0x4d, - 0x84, 0x29, 0x28, 0x31, 0x6a, 0x4d, 0x25, 0xfb, 0x69, 0x28, 0x36, 0x9d, 0xbb, 0x42, 0xe7, 0xf6, - 0x74, 0xe7, 0x66, 0x50, 0xfe, 0x73, 0x6b, 0xce, 0x5d, 0x7e, 0x2d, 0x7d, 0x5a, 0xae, 0x90, 0x35, - 0xe7, 0xee, 0x21, 0x77, 0x86, 0x65, 0xbb, 0xf4, 0x75, 0x37, 0x8c, 0x3e, 0xf7, 0x1f, 0xe3, 0xff, - 0x6c, 0xdd, 0xd1, 0x4a, 0x58, 0x5d, 0xae, 0x27, 0x7c, 0x9e, 0x7a, 0xaa, 0xcb, 0xf5, 0x92, 0x75, - 0xb9, 0x5e, 0x0f, 0x75, 0xb9, 0x1e, 0x7a, 0x0b, 0x06, 0x85, 0x4b, 0xa2, 0x08, 0x64, 0x34, 0xdf, - 0x43, 0x7d, 0xc2, 0xa3, 0x91, 0xd7, 0x39, 0x2f, 0xaf, 0xdd, 0x02, 0xda, 0xb5, 0x5e, 0x59, 0x21, - 0xfa, 0xcb, 0x16, 0x8c, 0x89, 0xdf, 0x98, 0xbc, 0xd9, 0x26, 0x61, 0x24, 0xc4, 0xd2, 0xf7, 0xf7, - 0xde, 0x06, 0x51, 0x90, 0x37, 0xe5, 0xfd, 0xf2, 0x9c, 0x31, 0x91, 0x5d, 0x5b, 0x94, 0x68, 0x05, - 0xfa, 0x3b, 0x16, 0x9c, 0x6a, 0x3a, 0x77, 0x79, 0x8d, 0x1c, 0x86, 0x9d, 0xc8, 0xf5, 0x85, 0x69, - 0xff, 0x83, 0xbd, 0x0d, 0x7f, 0xaa, 0x38, 0x6f, 0xa4, 0xb4, 0x3f, 0x9e, 0xca, 0x22, 0xe9, 0xda, - 0xd4, 0xcc, 0x76, 0xcd, 0x6c, 0xc1, 0x90, 0x9c, 0x6f, 0x19, 0xca, 0x8d, 0x65, 0x5d, 0xe6, 0x3e, - 0xb2, 0x47, 0xa8, 0xfe, 0xfc, 0x9f, 0xd6, 0x23, 0xe6, 0xda, 0x03, 0xad, 0xe7, 0xd3, 0x30, 0xa2, - 0xcf, 0xb1, 0x07, 0x5a, 0xd7, 0x9b, 0x30, 0x95, 0x31, 0x97, 0x1e, 0x68, 0x95, 0x77, 0xe0, 0x6c, - 0xee, 0xfc, 0x78, 0x90, 0x15, 0xdb, 0x5f, 0xb5, 0xf4, 0x7d, 0xf0, 0x04, 0x8c, 0x15, 0x4b, 0xa6, - 0xb1, 0xe2, 0x7c, 0xe7, 0x95, 0x93, 0x63, 0xb1, 0x78, 0x43, 0x6f, 0x34, 0xdd, 0xd5, 0xd1, 0x6b, - 0x30, 0xd0, 0xa0, 0x10, 0xe9, 0xd8, 0x6a, 0x77, 0x5f, 0x91, 0xb1, 0x30, 0xc9, 0xe0, 0x21, 0x16, - 0x1c, 0xec, 0x5f, 0xb0, 0xa0, 0xef, 0x04, 0x7a, 0x02, 0x9b, 0x3d, 0xf1, 0x6c, 0x2e, 0x6b, 0x11, - 0xd3, 0x79, 0x0e, 0x3b, 0x77, 0x56, 0xee, 0x46, 0xc4, 0x0b, 0xd9, 0x89, 0x9c, 0xd9, 0x31, 0x3f, - 0x69, 0xc1, 0xd4, 0x75, 0xdf, 0xa9, 0x2f, 0x3a, 0x0d, 0xc7, 0xab, 0x91, 0xa0, 0xec, 0x6d, 0x1f, - 0xc9, 0x2b, 0xbb, 0xd0, 0xd5, 0x2b, 0x7b, 0x49, 0x3a, 0x35, 0xf5, 0xe5, 0x8f, 0x1f, 0x95, 0xa4, - 0x93, 0x81, 0x5b, 0x0c, 0xf7, 0xdb, 0x1d, 0x40, 0x7a, 0x2b, 0xc5, 0x1b, 0x19, 0x0c, 0x83, 0x2e, - 0x6f, 0xaf, 0x18, 0xc4, 0x27, 0xb3, 0x25, 0xdc, 0xd4, 0xe7, 0x69, 0xaf, 0x3f, 0x38, 0x00, 0x4b, - 0x46, 0xf6, 0x4b, 0x90, 0xf9, 0xd0, 0xbe, 0xbb, 0x5e, 0xc2, 0xfe, 0x28, 0x4c, 0xb2, 0x92, 0x47, - 0xd4, 0x0c, 0xd8, 0x09, 0x6d, 0x6a, 0x46, 0xd0, 0x40, 0xfb, 0xf3, 0x16, 0x8c, 0xaf, 0x27, 0x62, - 0xa9, 0x5d, 0x64, 0xf6, 0xd7, 0x0c, 0x25, 0x7e, 0x95, 0x41, 0xb1, 0xc0, 0x1e, 0xbb, 0x92, 0xeb, - 0x4f, 0x2d, 0x88, 0x63, 0x5f, 0x9c, 0x80, 0xf8, 0xb6, 0x64, 0x88, 0x6f, 0x99, 0x82, 0xac, 0x6a, - 0x4e, 0x9e, 0xf4, 0x86, 0xae, 0xa9, 0xa8, 0x50, 0x1d, 0x64, 0xd8, 0x98, 0x0d, 0x9f, 0x8a, 0x63, - 0x66, 0xe8, 0x28, 0x19, 0x27, 0xca, 0xfe, 0xad, 0x02, 0x20, 0x45, 0xdb, 0x73, 0xd4, 0xaa, 0x74, - 0x89, 0xe3, 0x89, 0x5a, 0xb5, 0x07, 0x88, 0x79, 0x10, 0x04, 0x8e, 0x17, 0x72, 0xb6, 0xae, 0x50, - 0xeb, 0x1d, 0xcd, 0x3d, 0x61, 0x46, 0x54, 0x89, 0xae, 0xa7, 0xb8, 0xe1, 0x8c, 0x1a, 0x34, 0xcf, - 0x90, 0xfe, 0x5e, 0x3d, 0x43, 0x06, 0xba, 0xbc, 0x83, 0xfb, 0x8a, 0x05, 0xa3, 0xaa, 0x9b, 0xde, - 0x21, 0x5e, 0xea, 0xaa, 0x3d, 0x39, 0x1b, 0x68, 0x45, 0x6b, 0x32, 0x3b, 0x58, 0xbe, 0x93, 0xbd, - 0x67, 0x74, 0x1a, 0xee, 0x5b, 0x44, 0x45, 0x39, 0x9c, 0x15, 0xef, 0x13, 0x05, 0xf4, 0xf0, 0x60, - 0x76, 0x54, 0xfd, 0xe3, 0x51, 0x9c, 0xe3, 0x22, 0x74, 0x4b, 0x1e, 0x4f, 0x4c, 0x45, 0xf4, 0x22, - 0xf4, 0xb7, 0x76, 0x9c, 0x90, 0x24, 0x5e, 0xf3, 0xf4, 0x57, 0x28, 0xf0, 0xf0, 0x60, 0x76, 0x4c, - 0x15, 0x60, 0x10, 0xcc, 0xa9, 0x7b, 0x8f, 0x05, 0x96, 0x9e, 0x9c, 0x5d, 0x63, 0x81, 0xfd, 0xb1, - 0x05, 0x7d, 0xeb, 0x7e, 0xfd, 0x24, 0xb6, 0x80, 0x57, 0x8d, 0x2d, 0xe0, 0x91, 0xbc, 0x00, 0xfb, - 0xb9, 0xab, 0x7f, 0x35, 0xb1, 0xfa, 0xcf, 0xe7, 0x72, 0xe8, 0xbc, 0xf0, 0x9b, 0x30, 0xcc, 0xc2, - 0xf6, 0x8b, 0x97, 0x4b, 0xcf, 0x1b, 0x0b, 0x7e, 0x36, 0xb1, 0xe0, 0xc7, 0x35, 0x52, 0x6d, 0xa5, - 0x3f, 0x05, 0x83, 0xe2, 0x29, 0x4c, 0xf2, 0x59, 0xa8, 0xa0, 0xc5, 0x12, 0x6f, 0xff, 0x78, 0x11, - 0x8c, 0x34, 0x01, 0xe8, 0x97, 0x2c, 0x98, 0x0b, 0xb8, 0x8b, 0x6c, 0x7d, 0xb9, 0x1d, 0xb8, 0xde, - 0x76, 0xb5, 0xb6, 0x43, 0xea, 0xed, 0x86, 0xeb, 0x6d, 0x97, 0xb7, 0x3d, 0x5f, 0x81, 0x57, 0xee, - 0x92, 0x5a, 0x9b, 0x99, 0xdd, 0xba, 0xe4, 0x24, 0x50, 0xae, 0xe6, 0xcf, 0xdd, 0x3b, 0x98, 0x9d, - 0xc3, 0x47, 0xe2, 0x8d, 0x8f, 0xd8, 0x16, 0xf4, 0xeb, 0x16, 0xcc, 0xf3, 0xe8, 0xf9, 0xbd, 0xb7, - 0xbf, 0xc3, 0x6d, 0xb9, 0x22, 0x59, 0xc5, 0x4c, 0x36, 0x48, 0xd0, 0x5c, 0xfc, 0x80, 0xe8, 0xd0, - 0xf9, 0xca, 0xd1, 0xea, 0xc2, 0x47, 0x6d, 0x9c, 0xfd, 0x8f, 0x8b, 0x30, 0x2a, 0x62, 0x46, 0x89, - 0x33, 0xe0, 0x45, 0x63, 0x4a, 0x3c, 0x9a, 0x98, 0x12, 0x93, 0x06, 0xf1, 0xf1, 0x6c, 0xff, 0x21, - 0x4c, 0xd2, 0xcd, 0xf9, 0x2a, 0x71, 0x82, 0x68, 0x93, 0x38, 0xdc, 0xe1, 0xab, 0x78, 0xe4, 0xdd, - 0x5f, 0xe9, 0x27, 0xaf, 0x27, 0x99, 0xe1, 0x34, 0xff, 0x6f, 0xa7, 0x33, 0xc7, 0x83, 0x89, 0x54, - 0xd8, 0xaf, 0x8f, 0x41, 0x49, 0xbd, 0xe3, 0x10, 0x9b, 0x4e, 0xe7, 0xe8, 0x79, 0x49, 0x0e, 0x5c, - 0xfd, 0x15, 0xbf, 0x21, 0x8a, 0xd9, 0xd9, 0x7f, 0xaf, 0x60, 0x54, 0xc8, 0x07, 0x71, 0x1d, 0x86, - 0x9c, 0x30, 0x74, 0xb7, 0x3d, 0x52, 0xef, 0xa4, 0xa1, 0x4c, 0x55, 0xc3, 0xde, 0xd2, 0x2c, 0x88, - 0x92, 0x58, 0xf1, 0x40, 0x57, 0xb9, 0x5b, 0xdd, 0x1e, 0xe9, 0xa4, 0x9e, 0x4c, 0x71, 0x03, 0xe9, - 0x78, 0xb7, 0x47, 0xb0, 0x28, 0x8f, 0x3e, 0xc1, 0xfd, 0x1e, 0xaf, 0x79, 0xfe, 0x1d, 0xef, 0x8a, - 0xef, 0xcb, 0xb8, 0x0c, 0xbd, 0x31, 0x9c, 0x94, 0xde, 0x8e, 0xaa, 0x38, 0x36, 0xb9, 0xf5, 0x16, - 0x47, 0xf3, 0x33, 0xc0, 0xa2, 0x85, 0x9b, 0xcf, 0xa6, 0x43, 0x44, 0x60, 0x5c, 0x04, 0x24, 0x93, - 0x30, 0xd1, 0x77, 0x99, 0x57, 0x39, 0xb3, 0x74, 0xac, 0x48, 0xbf, 0x66, 0xb2, 0xc0, 0x49, 0x9e, - 0xf6, 0x4f, 0x5b, 0xc0, 0x9e, 0x90, 0x9e, 0x80, 0x3c, 0xf2, 0x21, 0x53, 0x1e, 0x99, 0xce, 0xeb, - 0xe4, 0x1c, 0x51, 0xe4, 0x05, 0x3e, 0xb3, 0x2a, 0x81, 0x7f, 0x77, 0x5f, 0x38, 0xab, 0x74, 0xbf, - 0x7f, 0xd8, 0xff, 0xc7, 0xe2, 0x9b, 0x98, 0x7a, 0x65, 0x81, 0x3e, 0x0b, 0x43, 0x35, 0xa7, 0xe5, - 0xd4, 0x78, 0x4e, 0x9b, 0x5c, 0x8d, 0x9e, 0x51, 0x68, 0x6e, 0x49, 0x94, 0xe0, 0x1a, 0x2a, 0x19, - 0xd8, 0x6e, 0x48, 0x82, 0xbb, 0x6a, 0xa5, 0x54, 0x95, 0x33, 0xbb, 0x30, 0x6a, 0x30, 0x7b, 0xa0, - 0xea, 0x8c, 0xcf, 0xf2, 0x23, 0x56, 0x05, 0x62, 0x6c, 0xc2, 0xa4, 0xa7, 0xfd, 0xa7, 0x07, 0x8a, - 0xbc, 0x5c, 0x3e, 0xde, 0xed, 0x10, 0x65, 0xa7, 0x8f, 0xf6, 0x3a, 0x35, 0xc1, 0x06, 0xa7, 0x39, - 0xdb, 0x3f, 0x61, 0xc1, 0x43, 0x3a, 0xa1, 0xf6, 0x00, 0xa6, 0x9b, 0x91, 0x64, 0x19, 0x86, 0xfc, - 0x16, 0x09, 0x9c, 0xc8, 0x0f, 0xc4, 0xa9, 0x71, 0x49, 0x76, 0xfa, 0x0d, 0x01, 0x3f, 0x14, 0x11, - 0xda, 0x25, 0x77, 0x09, 0xc7, 0xaa, 0x24, 0xbd, 0x7d, 0xb2, 0xce, 0x08, 0xc5, 0x53, 0x27, 0xb6, - 0x07, 0x30, 0x4b, 0x7a, 0x88, 0x05, 0xc6, 0xfe, 0x86, 0xc5, 0x27, 0x96, 0xde, 0x74, 0xf4, 0x26, - 0x4c, 0x34, 0x9d, 0xa8, 0xb6, 0xb3, 0x72, 0xb7, 0x15, 0x70, 0x93, 0x93, 0xec, 0xa7, 0xa7, 0xbb, - 0xf5, 0x93, 0xf6, 0x91, 0xb1, 0x2b, 0xe7, 0x5a, 0x82, 0x19, 0x4e, 0xb1, 0x47, 0x9b, 0x30, 0xcc, - 0x60, 0xec, 0x15, 0x5f, 0xd8, 0x49, 0x34, 0xc8, 0xab, 0x4d, 0x39, 0x23, 0xac, 0xc5, 0x7c, 0xb0, - 0xce, 0xd4, 0xfe, 0x72, 0x91, 0xaf, 0x76, 0x26, 0xca, 0x3f, 0x05, 0x83, 0x2d, 0xbf, 0xbe, 0x54, - 0x5e, 0xc6, 0x62, 0x14, 0xd4, 0x31, 0x52, 0xe1, 0x60, 0x2c, 0xf1, 0xe8, 0x12, 0x0c, 0x89, 0x9f, - 0xd2, 0x44, 0xc8, 0xf6, 0x66, 0x41, 0x17, 0x62, 0x85, 0x45, 0xcf, 0x01, 0xb4, 0x02, 0x7f, 0xcf, - 0xad, 0xb3, 0xe8, 0x12, 0x45, 0xd3, 0x8f, 0xa8, 0xa2, 0x30, 0x58, 0xa3, 0x42, 0xaf, 0xc0, 0x68, - 0xdb, 0x0b, 0xb9, 0x38, 0xa2, 0xc5, 0x92, 0x55, 0x1e, 0x2e, 0x37, 0x75, 0x24, 0x36, 0x69, 0xd1, - 0x02, 0x0c, 0x44, 0x0e, 0xf3, 0x8b, 0xe9, 0xcf, 0x77, 0xf7, 0xdd, 0xa0, 0x14, 0x7a, 0xfa, 0x14, - 0x5a, 0x00, 0x8b, 0x82, 0xe8, 0x63, 0xf2, 0x41, 0x2d, 0xdf, 0xd8, 0x85, 0x9f, 0x7d, 0x6f, 0x87, - 0x80, 0xf6, 0x9c, 0x56, 0xf8, 0xef, 0x1b, 0xbc, 0xd0, 0xcb, 0x00, 0xe4, 0x6e, 0x44, 0x02, 0xcf, - 0x69, 0x28, 0x6f, 0x36, 0x25, 0x17, 0x2c, 0xfb, 0xeb, 0x7e, 0x74, 0x33, 0x24, 0x2b, 0x8a, 0x02, - 0x6b, 0xd4, 0xf6, 0xaf, 0x97, 0x00, 0x62, 0xb9, 0x1d, 0xbd, 0x95, 0xda, 0xb8, 0x9e, 0xe9, 0x2c, - 0xe9, 0x1f, 0xdf, 0xae, 0x85, 0xbe, 0xcf, 0x82, 0x61, 0xa7, 0xd1, 0xf0, 0x6b, 0x0e, 0x8f, 0xf6, - 0x5b, 0xe8, 0xbc, 0x71, 0x8a, 0xfa, 0x17, 0xe2, 0x12, 0xbc, 0x09, 0xcf, 0xcb, 0x19, 0xaa, 0x61, - 0xba, 0xb6, 0x42, 0xaf, 0x18, 0xbd, 0x4f, 0x5e, 0x15, 0x8b, 0x46, 0x57, 0xaa, 0xab, 0x62, 0x89, - 0x9d, 0x11, 0xfa, 0x2d, 0xf1, 0xa6, 0x71, 0x4b, 0xec, 0xcb, 0x7f, 0x31, 0x68, 0x88, 0xaf, 0xdd, - 0x2e, 0x88, 0xa8, 0xa2, 0x47, 0x0f, 0xe8, 0xcf, 0x7f, 0x9e, 0xa7, 0xdd, 0x93, 0xba, 0x44, 0x0e, - 0xf8, 0x34, 0x8c, 0xd7, 0x4d, 0x21, 0x40, 0xcc, 0xc4, 0x27, 0xf3, 0xf8, 0x26, 0x64, 0x86, 0xf8, - 0xd8, 0x4f, 0x20, 0x70, 0x92, 0x31, 0xaa, 0xf0, 0x60, 0x12, 0x65, 0x6f, 0xcb, 0x17, 0x6f, 0x3d, - 0xec, 0xdc, 0xb1, 0xdc, 0x0f, 0x23, 0xd2, 0xa4, 0x94, 0xf1, 0xe9, 0xbe, 0x2e, 0xca, 0x62, 0xc5, - 0x05, 0xbd, 0x06, 0x03, 0xec, 0x7d, 0x56, 0x38, 0x3d, 0x94, 0xaf, 0x71, 0x36, 0xa3, 0xa3, 0xc5, - 0x0b, 0x92, 0xfd, 0x0d, 0xb1, 0xe0, 0x80, 0xae, 0xca, 0xd7, 0x8f, 0x61, 0xd9, 0xbb, 0x19, 0x12, - 0xf6, 0xfa, 0xb1, 0xb4, 0xf8, 0x78, 0xfc, 0xb0, 0x91, 0xc3, 0x33, 0x93, 0xac, 0x19, 0x25, 0xa9, - 0x14, 0x25, 0xfe, 0xcb, 0xdc, 0x6d, 0xd3, 0x90, 0xdf, 0x3c, 0x33, 0xbf, 0x5b, 0xdc, 0x9d, 0xb7, - 0x4c, 0x16, 0x38, 0xc9, 0x93, 0x4a, 0xa4, 0x7c, 0xd5, 0x8b, 0xd7, 0x22, 0xdd, 0xf6, 0x0e, 0x7e, - 0x11, 0x67, 0xa7, 0x11, 0x87, 0x60, 0x51, 0xfe, 0x44, 0xc5, 0x83, 0x19, 0x0f, 0x26, 0x92, 0x4b, - 0xf4, 0x81, 0x8a, 0x23, 0xbf, 0xdf, 0x07, 0x63, 0xe6, 0x94, 0x42, 0xf3, 0x50, 0x12, 0x4c, 0x54, - 0xfe, 0x03, 0xb5, 0x4a, 0xd6, 0x24, 0x02, 0xc7, 0x34, 0x2c, 0xed, 0x05, 0x2b, 0xae, 0xb9, 0x07, - 0xc7, 0x69, 0x2f, 0x14, 0x06, 0x6b, 0x54, 0xf4, 0x62, 0xb5, 0xe9, 0xfb, 0x91, 0x3a, 0x90, 0xd4, - 0xbc, 0x5b, 0x64, 0x50, 0x2c, 0xb0, 0xf4, 0x20, 0xda, 0x25, 0x81, 0x47, 0x1a, 0x66, 0xdc, 0x61, - 0x75, 0x10, 0x5d, 0xd3, 0x91, 0xd8, 0xa4, 0xa5, 0xc7, 0xa9, 0x1f, 0xb2, 0x89, 0x2c, 0xae, 0x6f, - 0xb1, 0xbb, 0x75, 0x95, 0x3f, 0xc0, 0x96, 0x78, 0xf4, 0x51, 0x78, 0x48, 0xc5, 0x56, 0xc2, 0xdc, - 0x9a, 0x21, 0x6b, 0x1c, 0x30, 0xb4, 0x2d, 0x0f, 0x2d, 0x65, 0x93, 0xe1, 0xbc, 0xf2, 0xe8, 0x55, - 0x18, 0x13, 0x22, 0xbe, 0xe4, 0x38, 0x68, 0x7a, 0x18, 0x5d, 0x33, 0xb0, 0x38, 0x41, 0x2d, 0x23, - 0x27, 0x33, 0x29, 0x5b, 0x72, 0x18, 0x4a, 0x47, 0x4e, 0xd6, 0xf1, 0x38, 0x55, 0x02, 0x2d, 0xc0, - 0x38, 0x97, 0xc1, 0x5c, 0x6f, 0x9b, 0x8f, 0x89, 0x78, 0xcc, 0xa5, 0x96, 0xd4, 0x0d, 0x13, 0x8d, - 0x93, 0xf4, 0xe8, 0x25, 0x18, 0x71, 0x82, 0xda, 0x8e, 0x1b, 0x91, 0x5a, 0xd4, 0x0e, 0xf8, 0x2b, - 0x2f, 0xcd, 0x45, 0x6b, 0x41, 0xc3, 0x61, 0x83, 0xd2, 0x7e, 0x0b, 0xa6, 0x32, 0x22, 0x33, 0xd0, - 0x89, 0xe3, 0xb4, 0x5c, 0xf9, 0x4d, 0x09, 0x0f, 0xe7, 0x85, 0x4a, 0x59, 0x7e, 0x8d, 0x46, 0x45, - 0x67, 0x27, 0x8b, 0xe0, 0xa0, 0xa5, 0x6a, 0x54, 0xb3, 0x73, 0x55, 0x22, 0x70, 0x4c, 0x63, 0xff, - 0xb7, 0x02, 0x8c, 0x67, 0xd8, 0x56, 0x58, 0xba, 0xc0, 0xc4, 0x25, 0x25, 0xce, 0x0e, 0x68, 0x06, - 0xe2, 0x2e, 0x1c, 0x21, 0x10, 0x77, 0xb1, 0x5b, 0x20, 0xee, 0xbe, 0xb7, 0x13, 0x88, 0xdb, 0xec, - 0xb1, 0xfe, 0x9e, 0x7a, 0x2c, 0x23, 0x78, 0xf7, 0xc0, 0x11, 0x83, 0x77, 0x1b, 0x9d, 0x3e, 0xd8, - 0x43, 0xa7, 0xff, 0x48, 0x01, 0x26, 0x92, 0xae, 0xa4, 0x27, 0xa0, 0xb7, 0x7d, 0xcd, 0xd0, 0xdb, - 0x5e, 0xea, 0xe5, 0xf1, 0x6d, 0xae, 0x0e, 0x17, 0x27, 0x74, 0xb8, 0xef, 0xed, 0x89, 0x5b, 0x67, - 0x7d, 0xee, 0x5f, 0x2f, 0xc0, 0xe9, 0xcc, 0xd7, 0xbf, 0x27, 0xd0, 0x37, 0x37, 0x8c, 0xbe, 0x79, - 0xb6, 0xe7, 0x87, 0xc9, 0xb9, 0x1d, 0x74, 0x3b, 0xd1, 0x41, 0xf3, 0xbd, 0xb3, 0xec, 0xdc, 0x4b, - 0x5f, 0x2f, 0xc2, 0xf9, 0xcc, 0x72, 0xb1, 0xda, 0x73, 0xd5, 0x50, 0x7b, 0x3e, 0x97, 0x50, 0x7b, - 0xda, 0x9d, 0x4b, 0x1f, 0x8f, 0x1e, 0x54, 0x3c, 0xd0, 0x65, 0x61, 0x06, 0xee, 0x53, 0x07, 0x6a, - 0x3c, 0xd0, 0x55, 0x8c, 0xb0, 0xc9, 0xf7, 0xdb, 0x49, 0xf7, 0xf9, 0xaf, 0x2c, 0x38, 0x9b, 0x39, - 0x36, 0x27, 0xa0, 0xeb, 0x5a, 0x37, 0x75, 0x5d, 0x4f, 0xf5, 0x3c, 0x5b, 0x73, 0x94, 0x5f, 0x3f, - 0xd5, 0x9f, 0xf3, 0x2d, 0xec, 0x26, 0x7f, 0x03, 0x86, 0x9d, 0x5a, 0x8d, 0x84, 0xe1, 0x9a, 0x5f, - 0x57, 0xb1, 0x86, 0x9f, 0x65, 0xf7, 0xac, 0x18, 0x7c, 0x78, 0x30, 0x3b, 0x93, 0x64, 0x11, 0xa3, - 0xb1, 0xce, 0x01, 0x7d, 0x02, 0x86, 0x42, 0x71, 0x6e, 0x8a, 0xb1, 0x7f, 0xbe, 0xc7, 0xce, 0x71, - 0x36, 0x49, 0xc3, 0x0c, 0x86, 0xa4, 0x34, 0x15, 0x8a, 0xa5, 0x19, 0x38, 0xa5, 0x70, 0xac, 0x81, - 0x53, 0x9e, 0x03, 0xd8, 0x53, 0x97, 0x81, 0xa4, 0xfe, 0x41, 0xbb, 0x26, 0x68, 0x54, 0xe8, 0xc3, - 0x30, 0x11, 0xf2, 0x68, 0x81, 0x4b, 0x0d, 0x27, 0x64, 0xef, 0x68, 0xc4, 0x2c, 0x64, 0x01, 0x97, - 0xaa, 0x09, 0x1c, 0x4e, 0x51, 0xa3, 0x55, 0x59, 0x2b, 0x0b, 0x6d, 0xc8, 0x27, 0xe6, 0xc5, 0xb8, - 0x46, 0x91, 0xac, 0xf8, 0x54, 0xb2, 0xfb, 0x59, 0xc7, 0x6b, 0x25, 0xd1, 0x27, 0x00, 0xe8, 0xf4, - 0x11, 0x7a, 0x88, 0xc1, 0xfc, 0xcd, 0x93, 0xee, 0x2a, 0xf5, 0x4c, 0xe7, 0x66, 0xf6, 0xa6, 0x76, - 0x59, 0x31, 0xc1, 0x1a, 0x43, 0xe4, 0xc0, 0x68, 0xfc, 0x2f, 0xce, 0xe5, 0x79, 0x29, 0xb7, 0x86, - 0x24, 0x73, 0xa6, 0xf2, 0x5e, 0xd6, 0x59, 0x60, 0x93, 0xa3, 0xfd, 0x63, 0x83, 0xf0, 0x70, 0x87, - 0x6d, 0x18, 0x2d, 0x98, 0xa6, 0xde, 0xa7, 0x93, 0xf7, 0xf7, 0x99, 0xcc, 0xc2, 0xc6, 0x85, 0x3e, - 0x31, 0xdb, 0x0b, 0x6f, 0x7b, 0xb6, 0xff, 0x90, 0xa5, 0x69, 0x56, 0xb8, 0x53, 0xe9, 0x87, 0x8e, - 0x78, 0xbc, 0x1c, 0xa3, 0xaa, 0x65, 0x2b, 0x43, 0x5f, 0xf1, 0x5c, 0xcf, 0xcd, 0xe9, 0x5d, 0x81, - 0xf1, 0x55, 0x0b, 0x90, 0xd0, 0xac, 0x90, 0xba, 0x5a, 0x4b, 0x42, 0x95, 0x71, 0xe5, 0xa8, 0xdf, - 0xbf, 0x90, 0xe2, 0xc4, 0x7b, 0xe2, 0x65, 0x79, 0x0e, 0xa4, 0x09, 0xba, 0xf6, 0x49, 0x46, 0xf3, - 0xd0, 0x47, 0x59, 0x20, 0x5d, 0xf7, 0x2d, 0x21, 0xfc, 0x88, 0xb5, 0xf6, 0xa2, 0x08, 0xa2, 0xab, - 0xe0, 0x54, 0xca, 0xcd, 0x6c, 0xae, 0x4e, 0x84, 0x0d, 0x56, 0x27, 0x7b, 0xf5, 0x6e, 0xc3, 0x43, - 0x39, 0x5d, 0xf6, 0x40, 0x6f, 0xe0, 0xbf, 0x69, 0xc1, 0xb9, 0x8e, 0x11, 0x61, 0xbe, 0x05, 0x65, - 0x43, 0xfb, 0x73, 0x16, 0x64, 0x0f, 0xb6, 0xe1, 0x51, 0x36, 0x0f, 0xa5, 0x5a, 0x22, 0xeb, 0x60, - 0x1c, 0x1b, 0x41, 0x65, 0x1c, 0x8c, 0x69, 0x0c, 0xc7, 0xb1, 0x42, 0x57, 0xc7, 0xb1, 0x5f, 0xb1, - 0x20, 0xb5, 0xbf, 0x9f, 0x80, 0xa0, 0x51, 0x36, 0x05, 0x8d, 0xc7, 0x7b, 0xe9, 0xcd, 0x1c, 0x19, - 0xe3, 0x8f, 0xc6, 0xe1, 0x4c, 0xce, 0x8b, 0xbc, 0x3d, 0x98, 0xdc, 0xae, 0x11, 0xf3, 0x71, 0x75, - 0xa7, 0xa0, 0x43, 0x1d, 0x5f, 0x62, 0xf3, 0x64, 0x8f, 0x29, 0x12, 0x9c, 0xae, 0x02, 0x7d, 0xce, - 0x82, 0x53, 0xce, 0x9d, 0x70, 0x85, 0x0a, 0x8c, 0x6e, 0x6d, 0xb1, 0xe1, 0xd7, 0x76, 0xe9, 0x69, - 0x2c, 0x17, 0xc2, 0x0b, 0x99, 0x4a, 0xbc, 0xdb, 0xd5, 0x14, 0xbd, 0x51, 0x3d, 0x4b, 0xed, 0x9b, - 0x45, 0x85, 0x33, 0xeb, 0x42, 0x58, 0x64, 0x4f, 0xa0, 0xd7, 0xd1, 0x0e, 0xcf, 0xff, 0xb3, 0x9e, - 0x4e, 0x72, 0x09, 0x48, 0x62, 0xb0, 0xe2, 0x83, 0x3e, 0x05, 0xa5, 0x6d, 0xf9, 0xd2, 0x37, 0x43, - 0xc2, 0x8a, 0x3b, 0xb2, 0xf3, 0xfb, 0x67, 0x6e, 0x89, 0x57, 0x44, 0x38, 0x66, 0x8a, 0x5e, 0x85, - 0xa2, 0xb7, 0x15, 0x76, 0xca, 0x8e, 0x9b, 0x70, 0xb9, 0xe4, 0x41, 0x36, 0xd6, 0x57, 0xab, 0x98, - 0x16, 0x44, 0x57, 0xa1, 0x18, 0x6c, 0xd6, 0x85, 0x06, 0x3a, 0x73, 0x91, 0xe2, 0xc5, 0xe5, 0x9c, - 0x56, 0x31, 0x4e, 0x78, 0x71, 0x19, 0x53, 0x16, 0xa8, 0x02, 0xfd, 0xec, 0x19, 0x9b, 0x90, 0x67, - 0x32, 0x6f, 0x6e, 0x1d, 0x9e, 0x83, 0xf2, 0x48, 0x1c, 0x8c, 0x00, 0x73, 0x46, 0x68, 0x03, 0x06, - 0x6a, 0x2c, 0x93, 0xaa, 0x10, 0x60, 0xde, 0x97, 0xa9, 0x6b, 0xee, 0x90, 0x62, 0x56, 0xa8, 0x5e, - 0x19, 0x05, 0x16, 0xbc, 0x18, 0x57, 0xd2, 0xda, 0xd9, 0x0a, 0x45, 0xa6, 0xf1, 0x6c, 0xae, 0x1d, - 0x32, 0x27, 0x0b, 0xae, 0x8c, 0x02, 0x0b, 0x5e, 0xe8, 0x65, 0x28, 0x6c, 0xd5, 0xc4, 0x13, 0xb5, - 0x4c, 0xa5, 0xb3, 0x19, 0x27, 0x65, 0x71, 0xe0, 0xde, 0xc1, 0x6c, 0x61, 0x75, 0x09, 0x17, 0xb6, - 0x6a, 0x68, 0x1d, 0x06, 0xb7, 0x78, 0x64, 0x05, 0xa1, 0x57, 0x7e, 0x32, 0x3b, 0xe8, 0x43, 0x2a, - 0xf8, 0x02, 0x7f, 0xee, 0x24, 0x10, 0x58, 0x32, 0x61, 0xc9, 0x08, 0x54, 0x84, 0x08, 0x11, 0xa0, - 0x6e, 0xee, 0x68, 0x51, 0x3d, 0xb8, 0x7c, 0x19, 0xc7, 0x99, 0xc0, 0x1a, 0x47, 0x3a, 0xab, 0x9d, - 0xb7, 0xda, 0x01, 0x8b, 0x02, 0x2e, 0x22, 0x19, 0x65, 0xce, 0xea, 0x05, 0x49, 0xd4, 0x69, 0x56, - 0x2b, 0x22, 0x1c, 0x33, 0x45, 0xbb, 0x30, 0xba, 0x17, 0xb6, 0x76, 0x88, 0x5c, 0xd2, 0x2c, 0xb0, - 0x51, 0x8e, 0x7c, 0x74, 0x4b, 0x10, 0xba, 0x41, 0xd4, 0x76, 0x1a, 0xa9, 0x5d, 0x88, 0xc9, 0xb2, - 0xb7, 0x74, 0x66, 0xd8, 0xe4, 0x4d, 0xbb, 0xff, 0xcd, 0xb6, 0xbf, 0xb9, 0x1f, 0x11, 0x11, 0x57, - 0x2e, 0xb3, 0xfb, 0x5f, 0xe7, 0x24, 0xe9, 0xee, 0x17, 0x08, 0x2c, 0x99, 0xa0, 0x5b, 0xa2, 0x7b, - 0xd8, 0xee, 0x39, 0x91, 0x1f, 0xfc, 0x75, 0x41, 0x12, 0xe5, 0x74, 0x0a, 0xdb, 0x2d, 0x63, 0x56, - 0x6c, 0x97, 0x6c, 0xed, 0xf8, 0x91, 0xef, 0x25, 0x76, 0xe8, 0xc9, 0xfc, 0x5d, 0xb2, 0x92, 0x41, - 0x9f, 0xde, 0x25, 0xb3, 0xa8, 0x70, 0x66, 0x5d, 0xa8, 0x0e, 0x63, 0x2d, 0x3f, 0x88, 0xee, 0xf8, - 0x81, 0x9c, 0x5f, 0xa8, 0x83, 0x5e, 0xcc, 0xa0, 0x14, 0x35, 0xb2, 0x90, 0x8d, 0x26, 0x06, 0x27, - 0x78, 0xa2, 0x8f, 0xc0, 0x60, 0x58, 0x73, 0x1a, 0xa4, 0x7c, 0x63, 0x7a, 0x2a, 0xff, 0xf8, 0xa9, - 0x72, 0x92, 0x9c, 0xd9, 0xc5, 0x03, 0x63, 0x70, 0x12, 0x2c, 0xd9, 0xa1, 0x55, 0xe8, 0x67, 0xc9, - 0xe6, 0x58, 0x10, 0xc4, 0x9c, 0x18, 0xb6, 0x29, 0x07, 0x78, 0xbe, 0x37, 0x31, 0x30, 0xe6, 0xc5, - 0xe9, 0x1a, 0x10, 0xd7, 0x43, 0x3f, 0x9c, 0x3e, 0x9d, 0xbf, 0x06, 0xc4, 0xad, 0xf2, 0x46, 0xb5, - 0xd3, 0x1a, 0x50, 0x44, 0x38, 0x66, 0x4a, 0x77, 0x66, 0xba, 0x9b, 0x9e, 0xe9, 0xe0, 0xb9, 0x95, - 0xbb, 0x97, 0xb2, 0x9d, 0x99, 0xee, 0xa4, 0x94, 0x85, 0xfd, 0xbb, 0x83, 0x69, 0x99, 0x85, 0x29, - 0x14, 0xbe, 0xc7, 0x4a, 0xd9, 0x9a, 0xdf, 0xdf, 0xab, 0x7e, 0xf3, 0x18, 0xaf, 0x42, 0x9f, 0xb3, - 0xe0, 0x4c, 0x2b, 0xf3, 0x43, 0x84, 0x00, 0xd0, 0x9b, 0x9a, 0x94, 0x7f, 0xba, 0x0a, 0x98, 0x99, - 0x8d, 0xc7, 0x39, 0x35, 0x25, 0xaf, 0x9b, 0xc5, 0xb7, 0x7d, 0xdd, 0x5c, 0x83, 0xa1, 0x1a, 0xbf, - 0x8a, 0x74, 0xcc, 0x2c, 0x9e, 0xbc, 0x7b, 0x33, 0x51, 0x42, 0xdc, 0x61, 0xb6, 0xb0, 0x62, 0x81, - 0x7e, 0xd8, 0x82, 0x73, 0xc9, 0xa6, 0x63, 0xc2, 0xd0, 0x22, 0xca, 0x26, 0xd7, 0x65, 0xac, 0x8a, - 0xef, 0x4f, 0xc9, 0xff, 0x06, 0xf1, 0x61, 0x37, 0x02, 0xdc, 0xb9, 0x32, 0xb4, 0x9c, 0xa1, 0x4c, - 0x19, 0x30, 0x0d, 0x48, 0x3d, 0x28, 0x54, 0x5e, 0x80, 0x91, 0xa6, 0xdf, 0xf6, 0x22, 0xe1, 0xe8, - 0x25, 0x9c, 0x4e, 0x98, 0xb3, 0xc5, 0x9a, 0x06, 0xc7, 0x06, 0x55, 0x42, 0x0d, 0x33, 0x74, 0xdf, - 0x6a, 0x98, 0x37, 0x60, 0xc4, 0xd3, 0x3c, 0x93, 0x85, 0x3c, 0x70, 0x31, 0x3f, 0x42, 0xae, 0xee, - 0xc7, 0xcc, 0x5b, 0xa9, 0x43, 0xb0, 0xc1, 0xed, 0x64, 0x3d, 0xc0, 0xbe, 0x64, 0x65, 0x08, 0xf5, - 0x5c, 0x15, 0xf3, 0x41, 0x53, 0x15, 0x73, 0x31, 0xa9, 0x8a, 0x49, 0x19, 0x0f, 0x0c, 0x2d, 0x4c, - 0xef, 0x09, 0x80, 0x7a, 0x8d, 0xb2, 0x69, 0x37, 0xe0, 0x42, 0xb7, 0x63, 0x89, 0x79, 0xfc, 0xd5, - 0x95, 0xa9, 0x38, 0xf6, 0xf8, 0xab, 0x97, 0x97, 0x31, 0xc3, 0xf4, 0x1a, 0xbf, 0xc9, 0xfe, 0x2f, - 0x16, 0x14, 0x2b, 0x7e, 0xfd, 0x04, 0x2e, 0xbc, 0x1f, 0x32, 0x2e, 0xbc, 0x0f, 0x67, 0x1f, 0x88, - 0xf5, 0x5c, 0xd3, 0xc7, 0x4a, 0xc2, 0xf4, 0x71, 0x2e, 0x8f, 0x41, 0x67, 0x43, 0xc7, 0x4f, 0x16, - 0x61, 0xb8, 0xe2, 0xd7, 0x95, 0xbb, 0xfd, 0x3f, 0xbd, 0x1f, 0x77, 0xfb, 0xdc, 0x34, 0x16, 0x1a, - 0x67, 0xe6, 0x28, 0x28, 0x5f, 0x1a, 0x7f, 0x8b, 0x79, 0xdd, 0xdf, 0x26, 0xee, 0xf6, 0x4e, 0x44, - 0xea, 0xc9, 0xcf, 0x39, 0x39, 0xaf, 0xfb, 0xdf, 0x2d, 0xc0, 0x78, 0xa2, 0x76, 0xd4, 0x80, 0xd1, - 0x86, 0xae, 0x58, 0x17, 0xf3, 0xf4, 0xbe, 0x74, 0xf2, 0xc2, 0x6b, 0x59, 0x03, 0x61, 0x93, 0x39, - 0x9a, 0x03, 0x50, 0x96, 0x66, 0xa9, 0x5e, 0x65, 0x52, 0xbf, 0x32, 0x45, 0x87, 0x58, 0xa3, 0x40, - 0x2f, 0xc2, 0x70, 0xe4, 0xb7, 0xfc, 0x86, 0xbf, 0xbd, 0x7f, 0x8d, 0xc8, 0xd0, 0x5e, 0xca, 0x17, - 0x71, 0x23, 0x46, 0x61, 0x9d, 0x0e, 0xdd, 0x85, 0x49, 0xc5, 0xa4, 0x7a, 0x0c, 0xc6, 0x06, 0xa6, - 0x55, 0x58, 0x4f, 0x72, 0xc4, 0xe9, 0x4a, 0xec, 0x9f, 0x29, 0xf2, 0x2e, 0xf6, 0x22, 0xf7, 0xdd, - 0xd5, 0xf0, 0xce, 0x5e, 0x0d, 0x5f, 0xb7, 0x60, 0x82, 0xd6, 0xce, 0x1c, 0xad, 0xe4, 0x31, 0xaf, - 0x62, 0x72, 0x5b, 0x1d, 0x62, 0x72, 0x5f, 0xa4, 0xbb, 0x66, 0xdd, 0x6f, 0x47, 0x42, 0x77, 0xa7, - 0x6d, 0x8b, 0x14, 0x8a, 0x05, 0x56, 0xd0, 0x91, 0x20, 0x10, 0x8f, 0x43, 0x75, 0x3a, 0x12, 0x04, - 0x58, 0x60, 0x65, 0xc8, 0xee, 0xbe, 0xec, 0x90, 0xdd, 0x3c, 0xf2, 0xaa, 0x70, 0xc9, 0x11, 0x02, - 0x97, 0x16, 0x79, 0x55, 0xfa, 0xea, 0xc4, 0x34, 0xf6, 0x57, 0x8b, 0x30, 0x52, 0xf1, 0xeb, 0xb1, - 0x95, 0xf9, 0x05, 0xc3, 0xca, 0x7c, 0x21, 0x61, 0x65, 0x9e, 0xd0, 0x69, 0xdf, 0xb5, 0x29, 0x7f, - 0xb3, 0x6c, 0xca, 0xbf, 0x6c, 0xb1, 0x51, 0x5b, 0x5e, 0xaf, 0x72, 0xbf, 0x3d, 0x74, 0x19, 0x86, - 0xd9, 0x06, 0xc3, 0x5e, 0x23, 0x4b, 0xd3, 0x2b, 0x4b, 0x45, 0xb5, 0x1e, 0x83, 0xb1, 0x4e, 0x83, - 0x2e, 0xc1, 0x50, 0x48, 0x9c, 0xa0, 0xb6, 0xa3, 0x76, 0x57, 0x61, 0x27, 0xe5, 0x30, 0xac, 0xb0, - 0xe8, 0xf5, 0x38, 0xe8, 0x67, 0x31, 0xff, 0x75, 0xa3, 0xde, 0x1e, 0xbe, 0x44, 0xf2, 0x23, 0x7d, - 0xda, 0xb7, 0x01, 0xa5, 0xe9, 0x7b, 0x08, 0x4b, 0x37, 0x6b, 0x86, 0xa5, 0x2b, 0xa5, 0x42, 0xd2, - 0xfd, 0x89, 0x05, 0x63, 0x15, 0xbf, 0x4e, 0x97, 0xee, 0xb7, 0xd3, 0x3a, 0xd5, 0x23, 0x1e, 0x0f, - 0x74, 0x88, 0x78, 0xfc, 0x18, 0xf4, 0x57, 0xfc, 0x7a, 0xb9, 0xd2, 0x29, 0xb4, 0x80, 0xfd, 0x37, - 0x2c, 0x18, 0xac, 0xf8, 0xf5, 0x13, 0x30, 0x0b, 0x7c, 0xd0, 0x34, 0x0b, 0x3c, 0x94, 0x33, 0x6f, - 0x72, 0x2c, 0x01, 0x7f, 0xad, 0x0f, 0x46, 0x69, 0x3b, 0xfd, 0x6d, 0x39, 0x94, 0x46, 0xb7, 0x59, - 0x3d, 0x74, 0x1b, 0x95, 0xc2, 0xfd, 0x46, 0xc3, 0xbf, 0x93, 0x1c, 0xd6, 0x55, 0x06, 0xc5, 0x02, - 0x8b, 0x9e, 0x81, 0xa1, 0x56, 0x40, 0xf6, 0x5c, 0x5f, 0x88, 0xb7, 0x9a, 0x91, 0xa5, 0x22, 0xe0, - 0x58, 0x51, 0xd0, 0x6b, 0x61, 0xe8, 0x7a, 0xf4, 0x28, 0xaf, 0xf9, 0x5e, 0x9d, 0x6b, 0xce, 0x8b, - 0x22, 0x2d, 0x87, 0x06, 0xc7, 0x06, 0x15, 0xba, 0x0d, 0x25, 0xf6, 0x9f, 0x6d, 0x3b, 0x47, 0x4f, - 0xf0, 0x2a, 0x12, 0xfe, 0x09, 0x06, 0x38, 0xe6, 0x85, 0x9e, 0x03, 0x88, 0x64, 0x68, 0xfb, 0x50, - 0x04, 0x5a, 0x53, 0x57, 0x01, 0x15, 0xf4, 0x3e, 0xc4, 0x1a, 0x15, 0x7a, 0x1a, 0x4a, 0x91, 0xe3, - 0x36, 0xae, 0xbb, 0x1e, 0x09, 0x99, 0x46, 0xbc, 0x28, 0xf3, 0xee, 0x09, 0x20, 0x8e, 0xf1, 0x54, - 0x14, 0x63, 0x41, 0x38, 0x78, 0x7a, 0xe8, 0x21, 0x46, 0xcd, 0x44, 0xb1, 0xeb, 0x0a, 0x8a, 0x35, - 0x0a, 0xb4, 0x03, 0x8f, 0xb8, 0x1e, 0x4b, 0x61, 0x41, 0xaa, 0xbb, 0x6e, 0x6b, 0xe3, 0x7a, 0xf5, - 0x16, 0x09, 0xdc, 0xad, 0xfd, 0x45, 0xa7, 0xb6, 0x4b, 0x3c, 0x99, 0xba, 0xf3, 0x71, 0xd1, 0xc4, - 0x47, 0xca, 0x1d, 0x68, 0x71, 0x47, 0x4e, 0xf6, 0xf3, 0x6c, 0xbe, 0xdf, 0xa8, 0xa2, 0xf7, 0x1a, - 0x5b, 0xc7, 0x19, 0x7d, 0xeb, 0x38, 0x3c, 0x98, 0x1d, 0xb8, 0x51, 0xd5, 0x62, 0x48, 0xbc, 0x04, - 0xa7, 0x2b, 0x7e, 0xbd, 0xe2, 0x07, 0xd1, 0xaa, 0x1f, 0xdc, 0x71, 0x82, 0xba, 0x9c, 0x5e, 0xb3, - 0x32, 0x8a, 0x06, 0xdd, 0x3f, 0xfb, 0xf9, 0xee, 0x62, 0x44, 0xc8, 0x78, 0x9e, 0x49, 0x6c, 0x47, - 0x7c, 0xfb, 0x55, 0x63, 0xb2, 0x83, 0x4a, 0x02, 0x73, 0xc5, 0x89, 0x08, 0xba, 0xc1, 0x92, 0x5b, - 0xc7, 0xc7, 0xa8, 0x28, 0xfe, 0x94, 0x96, 0xdc, 0x3a, 0x46, 0x66, 0x9e, 0xbb, 0x66, 0x79, 0xfb, - 0xb3, 0xa2, 0x12, 0x7e, 0x07, 0xe7, 0xfe, 0x75, 0xbd, 0x64, 0xb7, 0x95, 0x59, 0x22, 0x0a, 0xf9, - 0xe9, 0x05, 0xb8, 0xd5, 0xb3, 0x63, 0x96, 0x08, 0xfb, 0x45, 0x98, 0xa4, 0x57, 0x3f, 0x25, 0x47, - 0xb1, 0x8f, 0xec, 0x1e, 0xcd, 0xe3, 0xbf, 0xf6, 0xb3, 0x73, 0x20, 0x91, 0xfe, 0x04, 0x7d, 0x12, - 0xc6, 0x42, 0x72, 0xdd, 0xf5, 0xda, 0x77, 0xa5, 0xe2, 0xa5, 0xc3, 0x9b, 0xc3, 0xea, 0x8a, 0x4e, - 0xc9, 0xd5, 0xb7, 0x26, 0x0c, 0x27, 0xb8, 0xa1, 0x26, 0x8c, 0xdd, 0x71, 0xbd, 0xba, 0x7f, 0x27, - 0x94, 0xfc, 0x87, 0xf2, 0xb5, 0xb8, 0xb7, 0x39, 0x65, 0xa2, 0x8d, 0x46, 0x75, 0xb7, 0x0d, 0x66, - 0x38, 0xc1, 0x9c, 0xae, 0xb5, 0xa0, 0xed, 0x2d, 0x84, 0x37, 0x43, 0x12, 0x88, 0xe4, 0xea, 0x6c, - 0xad, 0x61, 0x09, 0xc4, 0x31, 0x9e, 0xae, 0x35, 0xf6, 0xe7, 0x4a, 0xe0, 0xb7, 0x79, 0xae, 0x0d, - 0xb1, 0xd6, 0xb0, 0x82, 0x62, 0x8d, 0x82, 0xee, 0x45, 0xec, 0xdf, 0xba, 0xef, 0x61, 0xdf, 0x8f, - 0xe4, 0xee, 0xc5, 0x3c, 0x11, 0x34, 0x38, 0x36, 0xa8, 0xd0, 0x2a, 0xa0, 0xb0, 0xdd, 0x6a, 0x35, - 0x98, 0x33, 0x93, 0xd3, 0x60, 0xac, 0xb8, 0x97, 0x47, 0x91, 0xc7, 0x0a, 0xae, 0xa6, 0xb0, 0x38, - 0xa3, 0x04, 0x3d, 0x96, 0xb6, 0x44, 0x53, 0xfb, 0x59, 0x53, 0xb9, 0xc5, 0xa7, 0xca, 0xdb, 0x29, - 0x71, 0x68, 0x05, 0x06, 0xc3, 0xfd, 0xb0, 0x16, 0x89, 0xd0, 0x8e, 0x39, 0x19, 0xae, 0xaa, 0x8c, - 0x44, 0x4b, 0xb0, 0xc8, 0x8b, 0x60, 0x59, 0x16, 0xd5, 0x60, 0x4a, 0x70, 0x5c, 0xda, 0x71, 0x3c, - 0x95, 0x2f, 0x88, 0xfb, 0x74, 0x5f, 0xbe, 0x77, 0x30, 0x3b, 0x25, 0x6a, 0xd6, 0xd1, 0x87, 0x07, - 0xb3, 0x67, 0x2a, 0x7e, 0x3d, 0x03, 0x83, 0xb3, 0xb8, 0xf1, 0xc9, 0x57, 0xab, 0xf9, 0xcd, 0x56, - 0x25, 0xf0, 0xb7, 0xdc, 0x06, 0xe9, 0x64, 0x35, 0xab, 0x1a, 0x94, 0x62, 0xf2, 0x19, 0x30, 0x9c, - 0xe0, 0x66, 0x7f, 0x96, 0x89, 0x6e, 0x2c, 0x9f, 0x78, 0xd4, 0x0e, 0x08, 0x6a, 0xc2, 0x68, 0x8b, - 0x2d, 0x6e, 0x91, 0x01, 0x43, 0xcc, 0xf5, 0x17, 0x7a, 0xd4, 0xfe, 0xdc, 0xa1, 0x27, 0x9e, 0xe9, - 0x19, 0x55, 0xd1, 0xd9, 0x61, 0x93, 0xbb, 0xfd, 0x6f, 0xce, 0xb2, 0xc3, 0xbf, 0xca, 0x55, 0x3a, - 0x83, 0xe2, 0x09, 0x89, 0xb8, 0x45, 0xce, 0xe4, 0xeb, 0x16, 0xe3, 0x61, 0x11, 0xcf, 0x50, 0xb0, - 0x2c, 0x8b, 0x3e, 0x01, 0x63, 0xf4, 0x52, 0xa6, 0x0e, 0xe0, 0x70, 0xfa, 0x54, 0x7e, 0xa8, 0x0f, - 0x45, 0xa5, 0x67, 0xc7, 0xd1, 0x0b, 0xe3, 0x04, 0x33, 0xf4, 0x3a, 0xf3, 0x44, 0x92, 0xac, 0x0b, - 0xbd, 0xb0, 0xd6, 0x9d, 0x8e, 0x24, 0x5b, 0x8d, 0x09, 0x6a, 0xc3, 0x54, 0x3a, 0x97, 0x5e, 0x38, - 0x6d, 0xe7, 0x4b, 0xb7, 0xe9, 0x74, 0x78, 0x71, 0x1a, 0x93, 0x34, 0x2e, 0xc4, 0x59, 0xfc, 0xd1, - 0x75, 0x18, 0x15, 0x49, 0xb5, 0xc5, 0xcc, 0x2d, 0x1a, 0x2a, 0xcf, 0x51, 0xac, 0x23, 0x0f, 0x93, - 0x00, 0x6c, 0x16, 0x46, 0xdb, 0x70, 0x4e, 0x4b, 0x72, 0x75, 0x25, 0x70, 0x98, 0xdf, 0x82, 0xcb, - 0xb6, 0x53, 0x4d, 0x2c, 0x79, 0xf4, 0xde, 0xc1, 0xec, 0xb9, 0x8d, 0x4e, 0x84, 0xb8, 0x33, 0x1f, - 0x74, 0x03, 0x4e, 0xf3, 0x87, 0xea, 0xcb, 0xc4, 0xa9, 0x37, 0x5c, 0x4f, 0xc9, 0x3d, 0x7c, 0xc9, - 0x9f, 0xbd, 0x77, 0x30, 0x7b, 0x7a, 0x21, 0x8b, 0x00, 0x67, 0x97, 0x43, 0x1f, 0x84, 0x52, 0xdd, - 0x0b, 0x45, 0x1f, 0x0c, 0x18, 0x79, 0xc4, 0x4a, 0xcb, 0xeb, 0x55, 0xf5, 0xfd, 0xf1, 0x1f, 0x1c, - 0x17, 0x40, 0xdb, 0x5c, 0x2d, 0xae, 0x94, 0x35, 0x83, 0xa9, 0x40, 0x5d, 0x49, 0x7d, 0xa6, 0xf1, - 0x54, 0x95, 0xdb, 0x83, 0xd4, 0x0b, 0x0e, 0xe3, 0x15, 0xab, 0xc1, 0x18, 0xbd, 0x06, 0x48, 0xc4, - 0xab, 0x5f, 0xa8, 0xb1, 0xf4, 0x2a, 0xcc, 0x8a, 0x30, 0x64, 0x3e, 0x9e, 0xac, 0xa6, 0x28, 0x70, - 0x46, 0x29, 0x74, 0x95, 0xee, 0x2a, 0x3a, 0x54, 0xec, 0x5a, 0x2a, 0xeb, 0xe3, 0x32, 0x69, 0x05, - 0x84, 0xf9, 0x61, 0x99, 0x1c, 0x71, 0xa2, 0x1c, 0xaa, 0xc3, 0x23, 0x4e, 0x3b, 0xf2, 0x99, 0xc5, - 0xc1, 0x24, 0xdd, 0xf0, 0x77, 0x89, 0xc7, 0x8c, 0x7d, 0x43, 0x8b, 0x17, 0xa8, 0x60, 0xb5, 0xd0, - 0x81, 0x0e, 0x77, 0xe4, 0x42, 0x05, 0x62, 0x95, 0xe6, 0x19, 0xcc, 0xf0, 0x63, 0x19, 0xa9, 0x9e, - 0x5f, 0x84, 0xe1, 0x1d, 0x3f, 0x8c, 0xd6, 0x49, 0x74, 0xc7, 0x0f, 0x76, 0x45, 0x18, 0xdd, 0x38, - 0x28, 0x79, 0x8c, 0xc2, 0x3a, 0x1d, 0xbd, 0xf1, 0x32, 0x57, 0x94, 0xf2, 0x32, 0xf3, 0x02, 0x18, - 0x8a, 0xf7, 0x98, 0xab, 0x1c, 0x8c, 0x25, 0x5e, 0x92, 0x96, 0x2b, 0x4b, 0xcc, 0xa2, 0x9f, 0x20, - 0x2d, 0x57, 0x96, 0xb0, 0xc4, 0xd3, 0xe9, 0x1a, 0xee, 0x38, 0x01, 0xa9, 0x04, 0x7e, 0x8d, 0x84, - 0x5a, 0x28, 0xfc, 0x87, 0x79, 0x90, 0x60, 0x3a, 0x5d, 0xab, 0x59, 0x04, 0x38, 0xbb, 0x1c, 0x22, - 0xe9, 0x04, 0x6f, 0x63, 0xf9, 0xa6, 0x98, 0xb4, 0x3c, 0xd3, 0x63, 0x8e, 0x37, 0x0f, 0x26, 0x54, - 0x6a, 0x39, 0x1e, 0x16, 0x38, 0x9c, 0x1e, 0x67, 0x73, 0xbb, 0xf7, 0x98, 0xc2, 0xca, 0xb8, 0x55, - 0x4e, 0x70, 0xc2, 0x29, 0xde, 0x46, 0x84, 0xb9, 0x89, 0xae, 0x11, 0xe6, 0xe6, 0xa1, 0x14, 0xb6, - 0x37, 0xeb, 0x7e, 0xd3, 0x71, 0x3d, 0x66, 0xd1, 0xd7, 0xae, 0x5e, 0x55, 0x89, 0xc0, 0x31, 0x0d, - 0x5a, 0x85, 0x21, 0x47, 0x5a, 0xae, 0x50, 0x7e, 0x4c, 0x21, 0x65, 0xaf, 0xe2, 0x61, 0x36, 0xa4, - 0xad, 0x4a, 0x95, 0x45, 0xaf, 0xc0, 0xa8, 0x78, 0x68, 0x2d, 0xb2, 0x9a, 0x4e, 0x99, 0xaf, 0xe1, - 0xaa, 0x3a, 0x12, 0x9b, 0xb4, 0xe8, 0x26, 0x0c, 0x47, 0x7e, 0x83, 0x3d, 0xe9, 0xa2, 0x62, 0xde, - 0x99, 0xfc, 0xe8, 0x78, 0x1b, 0x8a, 0x4c, 0x57, 0x1a, 0xab, 0xa2, 0x58, 0xe7, 0x83, 0x36, 0xf8, - 0x7c, 0x67, 0x81, 0xef, 0x49, 0x38, 0xfd, 0x50, 0xfe, 0x99, 0xa4, 0xe2, 0xe3, 0x9b, 0xcb, 0x41, - 0x94, 0xc4, 0x3a, 0x1b, 0x74, 0x05, 0x26, 0x5b, 0x81, 0xeb, 0xb3, 0x39, 0xa1, 0x8c, 0x96, 0xd3, - 0x66, 0x9a, 0xab, 0x4a, 0x92, 0x00, 0xa7, 0xcb, 0xb0, 0x77, 0xf2, 0x02, 0x38, 0x7d, 0x96, 0xa7, - 0xea, 0xe0, 0x37, 0x59, 0x0e, 0xc3, 0x0a, 0x8b, 0xd6, 0xd8, 0x4e, 0xcc, 0x95, 0x30, 0xd3, 0x33, - 0xf9, 0x61, 0x8c, 0x74, 0x65, 0x0d, 0x17, 0x5e, 0xd5, 0x5f, 0x1c, 0x73, 0x40, 0x75, 0x2d, 0x43, - 0x26, 0xbd, 0x02, 0x84, 0xd3, 0x8f, 0x74, 0xf0, 0x07, 0x4c, 0x5c, 0x8a, 0x62, 0x81, 0xc0, 0x00, - 0x87, 0x38, 0xc1, 0x13, 0x7d, 0x18, 0x26, 0x44, 0xf0, 0xc5, 0xb8, 0x9b, 0xce, 0xc5, 0x8e, 0xf2, - 0x38, 0x81, 0xc3, 0x29, 0x6a, 0x9e, 0x2a, 0xc3, 0xd9, 0x6c, 0x10, 0xb1, 0xf5, 0x5d, 0x77, 0xbd, - 0xdd, 0x70, 0xfa, 0x3c, 0xdb, 0x1f, 0x44, 0xaa, 0x8c, 0x24, 0x16, 0x67, 0x94, 0x40, 0x1b, 0x30, - 0xd1, 0x0a, 0x08, 0x69, 0x32, 0x41, 0x5f, 0x9c, 0x67, 0xb3, 0x3c, 0x4c, 0x04, 0x6d, 0x49, 0x25, - 0x81, 0x3b, 0xcc, 0x80, 0xe1, 0x14, 0x07, 0x74, 0x07, 0x86, 0xfc, 0x3d, 0x12, 0xec, 0x10, 0xa7, - 0x3e, 0x7d, 0xa1, 0xc3, 0xc3, 0x0d, 0x71, 0xb8, 0xdd, 0x10, 0xb4, 0x09, 0x47, 0x07, 0x09, 0xee, - 0xee, 0xe8, 0x20, 0x2b, 0x43, 0x7f, 0xde, 0x82, 0xb3, 0xd2, 0x36, 0x52, 0x6d, 0xd1, 0x5e, 0x5f, - 0xf2, 0xbd, 0x30, 0x0a, 0x78, 0x60, 0x83, 0x47, 0xf3, 0x1f, 0xfb, 0x6f, 0xe4, 0x14, 0x52, 0x7a, - 0xe0, 0xb3, 0x79, 0x14, 0x21, 0xce, 0xaf, 0x11, 0x2d, 0xc1, 0x64, 0x48, 0x22, 0xb9, 0x19, 0x2d, - 0x84, 0xab, 0xaf, 0x2f, 0xaf, 0x4f, 0x3f, 0xc6, 0xa3, 0x32, 0xd0, 0xc5, 0x50, 0x4d, 0x22, 0x71, - 0x9a, 0x1e, 0x5d, 0x86, 0x82, 0x1f, 0x4e, 0x3f, 0xde, 0x21, 0xa9, 0xaa, 0x5f, 0xbf, 0x51, 0xe5, - 0x0e, 0x6f, 0x37, 0xaa, 0xb8, 0xe0, 0x87, 0x32, 0x5d, 0x05, 0xbd, 0x8f, 0x85, 0xd3, 0x4f, 0x70, - 0xad, 0xa1, 0x4c, 0x57, 0xc1, 0x80, 0x38, 0xc6, 0xa3, 0x1d, 0x18, 0x0f, 0x8d, 0x7b, 0x6f, 0x38, - 0x7d, 0x91, 0xf5, 0xd4, 0x13, 0x79, 0x83, 0x66, 0x50, 0x6b, 0xd1, 0xe6, 0x4d, 0x2e, 0x38, 0xc9, - 0x96, 0xaf, 0x2e, 0xed, 0x82, 0x1f, 0x4e, 0x3f, 0xd9, 0x65, 0x75, 0x69, 0xc4, 0xfa, 0xea, 0xd2, - 0x79, 0xe0, 0x04, 0xcf, 0x99, 0xef, 0x84, 0xc9, 0x94, 0xb8, 0x74, 0x94, 0x4c, 0x4c, 0x33, 0xbb, - 0x30, 0x6a, 0x4c, 0xc9, 0x07, 0xea, 0x58, 0xf0, 0x2f, 0x06, 0xa1, 0xa4, 0x8c, 0xce, 0x68, 0xde, - 0xf4, 0x25, 0x38, 0x9b, 0xf4, 0x25, 0x18, 0xaa, 0xf8, 0x75, 0xc3, 0x7d, 0x60, 0x23, 0x23, 0x76, - 0x5f, 0xde, 0x06, 0xd8, 0xfb, 0x9b, 0x06, 0x4d, 0x93, 0x5f, 0xec, 0xd9, 0x29, 0xa1, 0xaf, 0xa3, - 0x71, 0xe0, 0x0a, 0x4c, 0x7a, 0x3e, 0x93, 0xd1, 0x49, 0x5d, 0x0a, 0x60, 0x4c, 0xce, 0x2a, 0xe9, - 0xc1, 0x70, 0x12, 0x04, 0x38, 0x5d, 0x86, 0x56, 0xc8, 0x05, 0xa5, 0xa4, 0x35, 0x82, 0xcb, 0x51, - 0x58, 0x60, 0xd1, 0x63, 0xd0, 0xdf, 0xf2, 0xeb, 0xe5, 0x8a, 0x90, 0xcf, 0xb5, 0x88, 0xb1, 0xf5, - 0x72, 0x05, 0x73, 0x1c, 0x5a, 0x80, 0x01, 0xf6, 0x23, 0x9c, 0x1e, 0xc9, 0x8f, 0x7a, 0xc2, 0x4a, - 0x68, 0x79, 0xae, 0x58, 0x01, 0x2c, 0x0a, 0x32, 0xad, 0x28, 0xbd, 0xd4, 0x30, 0xad, 0xe8, 0xe0, - 0x7d, 0x6a, 0x45, 0x25, 0x03, 0x1c, 0xf3, 0x42, 0x77, 0xe1, 0xb4, 0x71, 0x91, 0xe4, 0x53, 0x84, - 0x84, 0x22, 0xf2, 0xc2, 0x63, 0x1d, 0x6f, 0x90, 0xc2, 0x89, 0xe1, 0x9c, 0x68, 0xf4, 0xe9, 0x72, - 0x16, 0x27, 0x9c, 0x5d, 0x01, 0x6a, 0xc0, 0x64, 0x2d, 0x55, 0xeb, 0x50, 0xef, 0xb5, 0xaa, 0x01, - 0x4d, 0xd7, 0x98, 0x66, 0x8c, 0x5e, 0x81, 0xa1, 0x37, 0xfd, 0x90, 0x9d, 0x6d, 0xe2, 0x4e, 0x21, - 0x9f, 0xed, 0x0f, 0xbd, 0x7e, 0xa3, 0xca, 0xe0, 0x87, 0x07, 0xb3, 0xc3, 0x15, 0xbf, 0x2e, 0xff, - 0x62, 0x55, 0x00, 0x7d, 0xbf, 0x05, 0x33, 0xe9, 0x9b, 0xaa, 0x6a, 0xf4, 0x68, 0xef, 0x8d, 0xb6, - 0x45, 0xa5, 0x33, 0x2b, 0xb9, 0xec, 0x70, 0x87, 0xaa, 0xec, 0x5f, 0xb4, 0x98, 0x6e, 0x55, 0x18, - 0x07, 0x49, 0xd8, 0x6e, 0x9c, 0x44, 0x7a, 0xdf, 0x15, 0xc3, 0x6e, 0x79, 0xdf, 0x4e, 0x2d, 0xff, - 0xc4, 0x62, 0x4e, 0x2d, 0x27, 0xf8, 0x7a, 0xe5, 0x75, 0x18, 0x8a, 0x64, 0xda, 0xe5, 0x0e, 0x19, - 0x89, 0xb5, 0x46, 0x31, 0xc7, 0x1e, 0x25, 0xe1, 0xab, 0x0c, 0xcb, 0x8a, 0x8d, 0xfd, 0x0f, 0xf8, - 0x08, 0x48, 0xcc, 0x09, 0x98, 0x87, 0x96, 0x4d, 0xf3, 0xd0, 0x6c, 0x97, 0x2f, 0xc8, 0x31, 0x13, - 0xfd, 0x7d, 0xb3, 0xdd, 0x4c, 0xb3, 0xf5, 0x4e, 0xf7, 0xa6, 0xb2, 0x3f, 0x6f, 0x01, 0xc4, 0x01, - 0xb9, 0x7b, 0x48, 0xac, 0xf7, 0x12, 0x95, 0xe9, 0xfd, 0xc8, 0xaf, 0xf9, 0x0d, 0x61, 0xfc, 0x7c, - 0x24, 0xb6, 0x50, 0x71, 0xf8, 0xa1, 0xf6, 0x1b, 0x2b, 0x6a, 0x34, 0x2b, 0xc3, 0xff, 0x15, 0x63, - 0x9b, 0xa9, 0x11, 0xfa, 0xef, 0x8b, 0x16, 0x9c, 0xca, 0x72, 0x85, 0xa6, 0x37, 0x44, 0xae, 0xe3, - 0x53, 0x9e, 0x6e, 0x6a, 0x34, 0x6f, 0x09, 0x38, 0x56, 0x14, 0x3d, 0x67, 0x2c, 0x3c, 0x5a, 0x24, - 0xec, 0x1b, 0x30, 0x5a, 0x09, 0x88, 0x76, 0xb8, 0xbe, 0xca, 0x43, 0x4a, 0xf0, 0xf6, 0x3c, 0x73, - 0xe4, 0x70, 0x12, 0xf6, 0x97, 0x0b, 0x70, 0x8a, 0x3b, 0x8c, 0x2c, 0xec, 0xf9, 0x6e, 0xbd, 0xe2, - 0xd7, 0xc5, 0x83, 0xb7, 0x8f, 0xc1, 0x48, 0x4b, 0x53, 0xcc, 0x76, 0x8a, 0xea, 0xaa, 0x2b, 0x70, - 0x63, 0x55, 0x92, 0x0e, 0xc5, 0x06, 0x2f, 0x54, 0x87, 0x11, 0xb2, 0xe7, 0xd6, 0x94, 0xd7, 0x41, - 0xe1, 0xc8, 0x07, 0x9d, 0xaa, 0x65, 0x45, 0xe3, 0x83, 0x0d, 0xae, 0x0f, 0x20, 0x8f, 0xb8, 0xfd, - 0xa3, 0x16, 0x3c, 0x94, 0x13, 0x03, 0x96, 0x56, 0x77, 0x87, 0xb9, 0xe6, 0x88, 0x69, 0xab, 0xaa, - 0xe3, 0x0e, 0x3b, 0x58, 0x60, 0xd1, 0x47, 0x00, 0xb8, 0xc3, 0x0d, 0xf1, 0x6a, 0x5d, 0x83, 0x65, - 0x1a, 0x71, 0xfe, 0xb4, 0x90, 0x6d, 0xb2, 0x3c, 0xd6, 0x78, 0xd9, 0x5f, 0xec, 0x83, 0x7e, 0xe6, - 0xe0, 0x81, 0x2a, 0x30, 0xb8, 0xc3, 0xb3, 0xfa, 0x74, 0x1c, 0x37, 0x4a, 0x2b, 0x13, 0x05, 0xc5, - 0xe3, 0xa6, 0x41, 0xb1, 0x64, 0x83, 0xd6, 0x60, 0x8a, 0x27, 0x57, 0x6a, 0x2c, 0x93, 0x86, 0xb3, - 0x2f, 0x75, 0x9e, 0x3c, 0x13, 0xb0, 0xd2, 0xfd, 0x96, 0xd3, 0x24, 0x38, 0xab, 0x1c, 0x7a, 0x15, - 0xc6, 0xe8, 0x1d, 0xd4, 0x6f, 0x47, 0x92, 0x13, 0x4f, 0xab, 0xa4, 0xc4, 0xf2, 0x0d, 0x03, 0x8b, - 0x13, 0xd4, 0xe8, 0x15, 0x18, 0x6d, 0xa5, 0xb4, 0xbb, 0xfd, 0xb1, 0x1a, 0xc4, 0xd4, 0xe8, 0x9a, - 0xb4, 0xcc, 0x1b, 0xba, 0xcd, 0x7c, 0xbf, 0x37, 0x76, 0x02, 0x12, 0xee, 0xf8, 0x8d, 0x3a, 0x13, - 0xff, 0xfa, 0x35, 0x6f, 0xe8, 0x04, 0x1e, 0xa7, 0x4a, 0x50, 0x2e, 0x5b, 0x8e, 0xdb, 0x68, 0x07, - 0x24, 0xe6, 0x32, 0x60, 0x72, 0x59, 0x4d, 0xe0, 0x71, 0xaa, 0x44, 0x77, 0xb5, 0xf5, 0xe0, 0xf1, - 0xa8, 0xad, 0xed, 0x9f, 0x2a, 0x80, 0x31, 0xb4, 0xdf, 0xbe, 0xe9, 0x9e, 0xe8, 0x97, 0x6d, 0x07, - 0xad, 0x9a, 0x70, 0x66, 0xca, 0xfc, 0xb2, 0x38, 0x8b, 0x2b, 0xff, 0x32, 0xfa, 0x1f, 0xb3, 0x52, - 0x74, 0x8d, 0x9f, 0xae, 0x04, 0x3e, 0x3d, 0xe4, 0x64, 0xd0, 0x31, 0xf5, 0xe8, 0x60, 0x50, 0x3e, - 0xc8, 0xee, 0x10, 0x9e, 0x53, 0xb8, 0x65, 0x73, 0x0e, 0x86, 0xdf, 0x4f, 0x55, 0x44, 0x46, 0x90, - 0x5c, 0xd0, 0x65, 0x18, 0x16, 0x39, 0x7c, 0x98, 0x6f, 0x3c, 0x5f, 0x4c, 0xcc, 0x4f, 0x69, 0x39, - 0x06, 0x63, 0x9d, 0xc6, 0xfe, 0x81, 0x02, 0x4c, 0x65, 0x3c, 0x6e, 0xe2, 0xc7, 0xc8, 0xb6, 0x1b, - 0x46, 0x2a, 0x51, 0xac, 0x76, 0x8c, 0x70, 0x38, 0x56, 0x14, 0x74, 0xaf, 0xe2, 0x07, 0x55, 0xf2, - 0x70, 0x12, 0x8f, 0x07, 0x04, 0xf6, 0x88, 0x29, 0x57, 0x2f, 0x40, 0x5f, 0x3b, 0x24, 0x32, 0xb0, - 0xae, 0x3a, 0xb6, 0x99, 0x4d, 0x97, 0x61, 0xe8, 0x35, 0x6a, 0x5b, 0x99, 0x47, 0xb5, 0x6b, 0x14, - 0x37, 0x90, 0x72, 0x1c, 0x6d, 0x5c, 0x44, 0x3c, 0xc7, 0x8b, 0xc4, 0x65, 0x2b, 0x8e, 0x10, 0xc9, - 0xa0, 0x58, 0x60, 0xed, 0x2f, 0x14, 0xe1, 0x6c, 0xee, 0x73, 0x47, 0xda, 0xf4, 0xa6, 0xef, 0xb9, - 0x91, 0xaf, 0x1c, 0xc0, 0x78, 0x54, 0x48, 0xd2, 0xda, 0x59, 0x13, 0x70, 0xac, 0x28, 0xd0, 0x45, - 0xe8, 0x67, 0x1a, 0xe1, 0x54, 0xca, 0xdc, 0xc5, 0x65, 0x1e, 0x26, 0x8c, 0xa3, 0x7b, 0xce, 0x72, - 0xfe, 0x18, 0x95, 0x60, 0xfc, 0x46, 0xf2, 0x40, 0xa1, 0xcd, 0xf5, 0xfd, 0x06, 0x66, 0x48, 0xf4, - 0x84, 0xe8, 0xaf, 0x84, 0xc7, 0x13, 0x76, 0xea, 0x7e, 0xa8, 0x75, 0xda, 0x53, 0x30, 0xb8, 0x4b, - 0xf6, 0x03, 0xd7, 0xdb, 0x4e, 0x7a, 0xc2, 0x5d, 0xe3, 0x60, 0x2c, 0xf1, 0x66, 0x8e, 0xc7, 0xc1, - 0xe3, 0x4e, 0x4f, 0x3e, 0xd4, 0x55, 0x3c, 0xf9, 0xa1, 0x22, 0x8c, 0xe3, 0xc5, 0xe5, 0x77, 0x07, - 0xe2, 0x66, 0x7a, 0x20, 0x8e, 0x3b, 0x3d, 0x79, 0xf7, 0xd1, 0xf8, 0x39, 0x0b, 0xc6, 0x59, 0x26, - 0x21, 0x11, 0xd4, 0xc0, 0xf5, 0xbd, 0x13, 0xb8, 0x0a, 0x3c, 0x06, 0xfd, 0x01, 0xad, 0x34, 0x99, - 0x2b, 0x97, 0xb5, 0x04, 0x73, 0x1c, 0x7a, 0x04, 0xfa, 0x58, 0x13, 0xe8, 0xe0, 0x8d, 0xf0, 0x2d, - 0x78, 0xd9, 0x89, 0x1c, 0xcc, 0xa0, 0x2c, 0x48, 0x16, 0x26, 0xad, 0x86, 0xcb, 0x1b, 0x1d, 0xdb, - 0xeb, 0xdf, 0x19, 0x81, 0x10, 0x32, 0x9b, 0xf6, 0xf6, 0x82, 0x64, 0x65, 0xb3, 0xec, 0x7c, 0xcd, - 0xfe, 0xc3, 0x02, 0x9c, 0xcf, 0x2c, 0xd7, 0x73, 0x90, 0xac, 0xce, 0xa5, 0x1f, 0x64, 0xae, 0x98, - 0xe2, 0x09, 0xfa, 0x19, 0xf7, 0xf5, 0x2a, 0xfd, 0xf7, 0xf7, 0x10, 0xbb, 0x2a, 0xb3, 0xcb, 0xde, - 0x21, 0xb1, 0xab, 0x32, 0xdb, 0x96, 0xa3, 0x26, 0xf8, 0xd3, 0x42, 0xce, 0xb7, 0x30, 0x85, 0xc1, - 0x25, 0xba, 0xcf, 0x30, 0x64, 0x28, 0x2f, 0xe1, 0x7c, 0x8f, 0xe1, 0x30, 0xac, 0xb0, 0x68, 0x01, - 0xc6, 0x9b, 0xae, 0x47, 0x37, 0x9f, 0x7d, 0x53, 0x14, 0x57, 0x8a, 0xfc, 0x35, 0x13, 0x8d, 0x93, - 0xf4, 0xc8, 0xd5, 0xe2, 0x5a, 0xf1, 0xaf, 0x7b, 0xe5, 0x48, 0xab, 0x6e, 0xce, 0xf4, 0x65, 0x50, - 0xbd, 0x98, 0x11, 0xe3, 0x6a, 0x4d, 0xd3, 0x13, 0x15, 0x7b, 0xd7, 0x13, 0x8d, 0x64, 0xeb, 0x88, - 0x66, 0x5e, 0x81, 0xd1, 0xfb, 0x36, 0x0c, 0xd8, 0x5f, 0x2f, 0xc2, 0xc3, 0x1d, 0x96, 0x3d, 0xdf, - 0xeb, 0x8d, 0x31, 0xd0, 0xf6, 0xfa, 0xd4, 0x38, 0x54, 0xe0, 0xd4, 0x56, 0xbb, 0xd1, 0xd8, 0x67, - 0xcf, 0x6f, 0x48, 0x5d, 0x52, 0x08, 0x99, 0x52, 0x2a, 0x47, 0x4e, 0xad, 0x66, 0xd0, 0xe0, 0xcc, - 0x92, 0xf4, 0x8a, 0x45, 0x4f, 0x92, 0x7d, 0xc5, 0x2a, 0x71, 0xc5, 0xc2, 0x3a, 0x12, 0x9b, 0xb4, - 0xe8, 0x0a, 0x4c, 0x3a, 0x7b, 0x8e, 0xcb, 0x83, 0x83, 0x4b, 0x06, 0xfc, 0x8e, 0xa5, 0xf4, 0xb9, - 0x0b, 0x49, 0x02, 0x9c, 0x2e, 0x83, 0x5e, 0x03, 0xe4, 0x6f, 0x32, 0x27, 0xfd, 0xfa, 0x15, 0xe2, - 0x09, 0x93, 0x33, 0x1b, 0xbb, 0x62, 0xbc, 0x25, 0xdc, 0x48, 0x51, 0xe0, 0x8c, 0x52, 0x89, 0x20, - 0x4e, 0x03, 0xf9, 0x41, 0x9c, 0x3a, 0xef, 0x8b, 0x5d, 0xd3, 0x14, 0x5d, 0x86, 0xd1, 0x23, 0xba, - 0x9e, 0xda, 0xff, 0xc1, 0xa2, 0x27, 0x1e, 0x2f, 0x63, 0x46, 0x48, 0x7d, 0x85, 0xf9, 0xc6, 0x72, - 0xf5, 0xb0, 0x16, 0x25, 0xe7, 0xb4, 0xe6, 0x1b, 0x1b, 0x23, 0xb1, 0x49, 0xcb, 0xe7, 0x90, 0xe6, - 0xd3, 0x6a, 0xdc, 0x0a, 0x44, 0x18, 0x37, 0x45, 0x81, 0x3e, 0x0a, 0x83, 0x75, 0x77, 0xcf, 0x0d, - 0x85, 0x72, 0xec, 0xc8, 0x96, 0xa8, 0x78, 0xeb, 0x5c, 0xe6, 0x6c, 0xb0, 0xe4, 0x67, 0xff, 0x50, - 0x21, 0xee, 0x93, 0xd7, 0xdb, 0x7e, 0xe4, 0x9c, 0xc0, 0x49, 0x7e, 0xc5, 0x38, 0xc9, 0x9f, 0xe8, - 0x14, 0xcb, 0x8e, 0x35, 0x29, 0xf7, 0x04, 0xbf, 0x91, 0x38, 0xc1, 0x9f, 0xec, 0xce, 0xaa, 0xf3, - 0xc9, 0xfd, 0x0f, 0x2d, 0x98, 0x34, 0xe8, 0x4f, 0xe0, 0x00, 0x59, 0x35, 0x0f, 0x90, 0x47, 0xbb, - 0x7e, 0x43, 0xce, 0xc1, 0xf1, 0xbd, 0xc5, 0x44, 0xdb, 0xd9, 0x81, 0xf1, 0x26, 0xf4, 0xed, 0x38, - 0x41, 0xbd, 0x53, 0xee, 0x8e, 0x54, 0xa1, 0xb9, 0xab, 0x4e, 0x20, 0xcc, 0xf4, 0xcf, 0xc8, 0x5e, - 0xa7, 0xa0, 0xae, 0x26, 0x7a, 0x56, 0x15, 0x7a, 0x09, 0x06, 0xc2, 0x9a, 0xdf, 0x52, 0xef, 0x75, - 0x2e, 0xb0, 0x8e, 0x66, 0x90, 0xc3, 0x83, 0x59, 0x64, 0x56, 0x47, 0xc1, 0x58, 0xd0, 0xa3, 0x8f, - 0xc1, 0x28, 0xfb, 0xa5, 0x7c, 0xe6, 0x8a, 0xf9, 0x1a, 0x8c, 0xaa, 0x4e, 0xc8, 0x1d, 0x4a, 0x0d, - 0x10, 0x36, 0x59, 0xcd, 0x6c, 0x43, 0x49, 0x7d, 0xd6, 0x03, 0x35, 0xf5, 0xfe, 0xbb, 0x22, 0x4c, - 0x65, 0xcc, 0x39, 0x14, 0x1a, 0x23, 0x71, 0xb9, 0xc7, 0xa9, 0xfa, 0x36, 0xc7, 0x22, 0x64, 0x17, - 0xa8, 0xba, 0x98, 0x5b, 0x3d, 0x57, 0x7a, 0x33, 0x24, 0xc9, 0x4a, 0x29, 0xa8, 0x7b, 0xa5, 0xb4, - 0xb2, 0x13, 0xeb, 0x6a, 0x5a, 0x91, 0x6a, 0xe9, 0x03, 0x1d, 0xd3, 0x5f, 0xee, 0x83, 0x53, 0x59, - 0xe1, 0x35, 0xd1, 0x67, 0x12, 0x99, 0x63, 0x5f, 0xe8, 0x35, 0x30, 0x27, 0x4f, 0x27, 0x2b, 0xc2, - 0xfe, 0xcd, 0x99, 0xb9, 0x64, 0xbb, 0x76, 0xb3, 0xa8, 0x93, 0x05, 0x1e, 0x09, 0x78, 0xc6, 0x5f, - 0xb9, 0x7d, 0xbc, 0xbf, 0xe7, 0x06, 0x88, 0x54, 0xc1, 0x61, 0xc2, 0x1f, 0x47, 0x82, 0xbb, 0xfb, - 0xe3, 0xc8, 0x9a, 0x51, 0x19, 0x06, 0x6a, 0xdc, 0xd1, 0xa3, 0xd8, 0x7d, 0x0b, 0xe3, 0x5e, 0x1e, - 0x6a, 0x03, 0x16, 0xde, 0x1d, 0x82, 0xc1, 0x8c, 0x0b, 0xc3, 0x5a, 0xc7, 0x3c, 0xd0, 0xc9, 0xb3, - 0x4b, 0x0f, 0x3e, 0xad, 0x0b, 0x1e, 0xe8, 0x04, 0xfa, 0x51, 0x0b, 0x12, 0xaf, 0x3d, 0x94, 0x52, - 0xce, 0xca, 0x55, 0xca, 0x5d, 0x80, 0xbe, 0xc0, 0x6f, 0x90, 0x64, 0xb6, 0x56, 0xec, 0x37, 0x08, - 0x66, 0x18, 0x4a, 0x11, 0xc5, 0xaa, 0x96, 0x11, 0xfd, 0x1a, 0x29, 0x2e, 0x88, 0x8f, 0x41, 0x7f, - 0x83, 0xec, 0x91, 0x46, 0x32, 0xa9, 0xd6, 0x75, 0x0a, 0xc4, 0x1c, 0x67, 0xff, 0x5c, 0x1f, 0x9c, - 0xeb, 0x18, 0x05, 0x88, 0x5e, 0xc6, 0xb6, 0x9d, 0x88, 0xdc, 0x71, 0xf6, 0x93, 0xd9, 0x6f, 0xae, - 0x70, 0x30, 0x96, 0x78, 0xf6, 0xf4, 0x90, 0x07, 0xb1, 0x4f, 0xa8, 0x30, 0x45, 0xec, 0x7a, 0x81, - 0x35, 0x55, 0x62, 0xc5, 0xe3, 0x50, 0x89, 0x3d, 0x07, 0x10, 0x86, 0x0d, 0xee, 0x13, 0x57, 0x17, - 0x6f, 0x1a, 0xe3, 0x64, 0x07, 0xd5, 0xeb, 0x02, 0x83, 0x35, 0x2a, 0xb4, 0x0c, 0x13, 0xad, 0xc0, - 0x8f, 0xb8, 0x46, 0x78, 0x99, 0xbb, 0x8d, 0xf6, 0x9b, 0x01, 0x58, 0x2a, 0x09, 0x3c, 0x4e, 0x95, - 0x40, 0x2f, 0xc2, 0xb0, 0x08, 0xca, 0x52, 0xf1, 0xfd, 0x86, 0x50, 0x42, 0x29, 0x4f, 0xca, 0x6a, - 0x8c, 0xc2, 0x3a, 0x9d, 0x56, 0x8c, 0xa9, 0x99, 0x07, 0x33, 0x8b, 0x71, 0x55, 0xb3, 0x46, 0x97, - 0x88, 0xda, 0x3b, 0xd4, 0x53, 0xd4, 0xde, 0x58, 0x2d, 0x57, 0xea, 0xd9, 0xea, 0x09, 0x5d, 0x15, - 0x59, 0x5f, 0xe9, 0x83, 0x29, 0x31, 0x71, 0x1e, 0xf4, 0x74, 0xb9, 0x99, 0x9e, 0x2e, 0xc7, 0xa1, - 0xb8, 0x7b, 0x77, 0xce, 0x9c, 0xf4, 0x9c, 0xf9, 0x61, 0x0b, 0x4c, 0x49, 0x0d, 0xfd, 0x99, 0xdc, - 0xf4, 0x61, 0x2f, 0xe6, 0x4a, 0x7e, 0xca, 0x6b, 0xf0, 0x6d, 0x26, 0x12, 0xb3, 0xff, 0xbd, 0x05, - 0x8f, 0x76, 0xe5, 0x88, 0x56, 0xa0, 0xc4, 0xc4, 0x49, 0xed, 0xa2, 0xf7, 0xa4, 0x72, 0x2b, 0x97, - 0x88, 0x1c, 0xe9, 0x36, 0x2e, 0x89, 0x56, 0x52, 0x79, 0xda, 0x9e, 0xca, 0xc8, 0xd3, 0x76, 0xda, - 0xe8, 0x9e, 0xfb, 0x4c, 0xd4, 0xf6, 0x83, 0xf4, 0xc4, 0x31, 0x9e, 0x74, 0xa1, 0xf7, 0x1b, 0x4a, - 0x47, 0x3b, 0xa1, 0x74, 0x44, 0x26, 0xb5, 0x76, 0x86, 0x7c, 0x18, 0x26, 0x58, 0xb4, 0x36, 0xf6, - 0xc8, 0x41, 0x3c, 0x36, 0x2b, 0xc4, 0x8e, 0xcc, 0xd7, 0x13, 0x38, 0x9c, 0xa2, 0xb6, 0xff, 0xa0, - 0x08, 0x03, 0x7c, 0xf9, 0x9d, 0xc0, 0xf5, 0xf2, 0x69, 0x28, 0xb9, 0xcd, 0x66, 0x9b, 0xa7, 0xde, - 0xea, 0x8f, 0xdd, 0x62, 0xcb, 0x12, 0x88, 0x63, 0x3c, 0x5a, 0x15, 0xfa, 0xee, 0x0e, 0x01, 0x61, - 0x79, 0xc3, 0xe7, 0x96, 0x9d, 0xc8, 0xe1, 0xb2, 0x92, 0x3a, 0x67, 0x63, 0xcd, 0x38, 0xfa, 0x24, - 0x40, 0x18, 0x05, 0xae, 0xb7, 0x4d, 0x61, 0x22, 0x0e, 0xf5, 0x7b, 0x3b, 0x70, 0xab, 0x2a, 0x62, - 0xce, 0x33, 0xde, 0x73, 0x14, 0x02, 0x6b, 0x1c, 0xd1, 0x9c, 0x71, 0xd2, 0xcf, 0x24, 0xc6, 0x0e, - 0x38, 0xd7, 0x78, 0xcc, 0x66, 0x3e, 0x00, 0x25, 0xc5, 0xbc, 0x9b, 0xf6, 0x6b, 0x44, 0x17, 0x8b, - 0x3e, 0x04, 0xe3, 0x89, 0xb6, 0x1d, 0x49, 0x79, 0xf6, 0xf3, 0x16, 0x8c, 0xf3, 0xc6, 0xac, 0x78, - 0x7b, 0xe2, 0x34, 0x78, 0x0b, 0x4e, 0x35, 0x32, 0x76, 0x65, 0x31, 0xfc, 0xbd, 0xef, 0xe2, 0x4a, - 0x59, 0x96, 0x85, 0xc5, 0x99, 0x75, 0xa0, 0x4b, 0x74, 0xc5, 0xd1, 0x5d, 0xd7, 0x69, 0x88, 0xb7, - 0xf5, 0x23, 0x7c, 0xb5, 0x71, 0x18, 0x56, 0x58, 0xfb, 0xb7, 0x2d, 0x98, 0xe4, 0x2d, 0xbf, 0x46, - 0xf6, 0xd5, 0xde, 0xf4, 0xcd, 0x6c, 0xbb, 0x48, 0xfa, 0x58, 0xc8, 0x49, 0xfa, 0xa8, 0x7f, 0x5a, - 0xb1, 0xe3, 0xa7, 0x7d, 0xd9, 0x02, 0x31, 0x43, 0x4e, 0x40, 0x9f, 0xf1, 0x9d, 0xa6, 0x3e, 0x63, - 0x26, 0x7f, 0x11, 0xe4, 0x28, 0x32, 0xfe, 0xc4, 0x82, 0x09, 0x4e, 0x10, 0xdb, 0xea, 0xbf, 0xa9, - 0xe3, 0xd0, 0x4b, 0x6a, 0xf8, 0x6b, 0x64, 0x7f, 0xc3, 0xaf, 0x38, 0xd1, 0x4e, 0xf6, 0x47, 0x19, - 0x83, 0xd5, 0xd7, 0x71, 0xb0, 0xea, 0x72, 0x01, 0x19, 0x39, 0x91, 0xba, 0xbc, 0x90, 0x3f, 0x6a, - 0x4e, 0x24, 0xfb, 0x1b, 0x16, 0x20, 0x5e, 0x8d, 0x21, 0xb8, 0x51, 0x71, 0x88, 0x41, 0xb5, 0x83, - 0x2e, 0xde, 0x9a, 0x14, 0x06, 0x6b, 0x54, 0xc7, 0xd2, 0x3d, 0x09, 0x87, 0x8b, 0x62, 0x77, 0x87, - 0x8b, 0x23, 0xf4, 0xe8, 0xbf, 0x1c, 0x80, 0xe4, 0xb3, 0x36, 0x74, 0x0b, 0x46, 0x6a, 0x4e, 0xcb, - 0xd9, 0x74, 0x1b, 0x6e, 0xe4, 0x92, 0xb0, 0x93, 0x37, 0xd6, 0x92, 0x46, 0x27, 0x4c, 0xe4, 0x1a, - 0x04, 0x1b, 0x7c, 0xd0, 0x1c, 0x40, 0x2b, 0x70, 0xf7, 0xdc, 0x06, 0xd9, 0x66, 0x6a, 0x17, 0x16, - 0xcd, 0x83, 0xbb, 0x86, 0x49, 0x28, 0xd6, 0x28, 0x32, 0x62, 0x08, 0x14, 0x1f, 0x70, 0x0c, 0x01, - 0x38, 0xb1, 0x18, 0x02, 0x7d, 0x47, 0x8a, 0x21, 0x30, 0x74, 0xe4, 0x18, 0x02, 0xfd, 0x3d, 0xc5, - 0x10, 0xc0, 0x70, 0x46, 0xca, 0x9e, 0xf4, 0xff, 0xaa, 0xdb, 0x20, 0xe2, 0xc2, 0xc1, 0x43, 0x90, - 0xcc, 0xdc, 0x3b, 0x98, 0x3d, 0x83, 0x33, 0x29, 0x70, 0x4e, 0x49, 0xf4, 0x11, 0x98, 0x76, 0x1a, - 0x0d, 0xff, 0x8e, 0x1a, 0xd4, 0x95, 0xb0, 0xe6, 0x34, 0xb8, 0x09, 0x64, 0x90, 0x71, 0x7d, 0xe4, - 0xde, 0xc1, 0xec, 0xf4, 0x42, 0x0e, 0x0d, 0xce, 0x2d, 0x8d, 0x3e, 0x08, 0xa5, 0x56, 0xe0, 0xd7, - 0xd6, 0xb4, 0xb7, 0xb7, 0xe7, 0x69, 0x07, 0x56, 0x24, 0xf0, 0xf0, 0x60, 0x76, 0x54, 0xfd, 0x61, - 0x07, 0x7e, 0x5c, 0x20, 0x23, 0x28, 0xc0, 0xf0, 0xb1, 0x06, 0x05, 0xd8, 0x85, 0xa9, 0x2a, 0x09, - 0x5c, 0xa7, 0xe1, 0xbe, 0x45, 0xe5, 0x65, 0xb9, 0x3f, 0x6d, 0x40, 0x29, 0x48, 0xec, 0xc8, 0x3d, - 0x05, 0x69, 0xd5, 0x92, 0xd3, 0xc8, 0x1d, 0x38, 0x66, 0x64, 0xff, 0x6f, 0x0b, 0x06, 0xc5, 0x33, - 0xb6, 0x13, 0x90, 0x1a, 0x17, 0x0c, 0xa3, 0xc4, 0x6c, 0x76, 0x87, 0xb1, 0xc6, 0xe4, 0x9a, 0x23, - 0xca, 0x09, 0x73, 0xc4, 0xa3, 0x9d, 0x98, 0x74, 0x36, 0x44, 0xfc, 0xd5, 0x22, 0x95, 0xde, 0x8d, - 0x07, 0xd5, 0x0f, 0xbe, 0x0b, 0xd6, 0x61, 0x30, 0x14, 0x0f, 0x7a, 0x0b, 0xf9, 0x2f, 0x2a, 0x92, - 0x83, 0x18, 0x7b, 0xd1, 0x89, 0x27, 0xbc, 0x92, 0x49, 0xe6, 0x4b, 0xe1, 0xe2, 0x03, 0x7c, 0x29, - 0xdc, 0xed, 0xc9, 0x79, 0xdf, 0x71, 0x3c, 0x39, 0xb7, 0xbf, 0xc6, 0x4e, 0x4e, 0x1d, 0x7e, 0x02, - 0x42, 0xd5, 0x15, 0xf3, 0x8c, 0xb5, 0x3b, 0xcc, 0x2c, 0xd1, 0xa8, 0x1c, 0xe1, 0xea, 0x67, 0x2d, - 0x38, 0x97, 0xf1, 0x55, 0x9a, 0xa4, 0xf5, 0x0c, 0x0c, 0x39, 0xed, 0xba, 0xab, 0xd6, 0xb2, 0x66, - 0x9a, 0x5c, 0x10, 0x70, 0xac, 0x28, 0xd0, 0x12, 0x4c, 0x92, 0xbb, 0x2d, 0x97, 0x1b, 0x72, 0x75, - 0xe7, 0xe3, 0x22, 0x7f, 0xfb, 0xb8, 0x92, 0x44, 0xe2, 0x34, 0xbd, 0x0a, 0x4e, 0x54, 0xcc, 0x0d, - 0x4e, 0xf4, 0xb7, 0x2d, 0x18, 0x56, 0x4f, 0x5a, 0x1f, 0x78, 0x6f, 0x7f, 0xd8, 0xec, 0xed, 0x87, - 0x3b, 0xf4, 0x76, 0x4e, 0x37, 0xff, 0x66, 0x41, 0xb5, 0xb7, 0xe2, 0x07, 0x51, 0x0f, 0x12, 0xdc, - 0xfd, 0x3f, 0x9c, 0xb8, 0x0c, 0xc3, 0x4e, 0xab, 0x25, 0x11, 0xd2, 0x03, 0x8e, 0x85, 0xdc, 0x8e, - 0xc1, 0x58, 0xa7, 0x51, 0xef, 0x38, 0x8a, 0xb9, 0xef, 0x38, 0xea, 0x00, 0x91, 0x13, 0x6c, 0x93, - 0x88, 0xc2, 0x84, 0xc3, 0x6e, 0xfe, 0x7e, 0xd3, 0x8e, 0xdc, 0xc6, 0x9c, 0xeb, 0x45, 0x61, 0x14, - 0xcc, 0x95, 0xbd, 0xe8, 0x46, 0xc0, 0xaf, 0x90, 0x5a, 0x78, 0x2f, 0xc5, 0x0b, 0x6b, 0x7c, 0x65, - 0xf8, 0x06, 0x56, 0x47, 0xbf, 0xe9, 0x4a, 0xb1, 0x2e, 0xe0, 0x58, 0x51, 0xd8, 0x1f, 0x60, 0xa7, - 0x0f, 0xeb, 0xd3, 0xa3, 0x85, 0xb6, 0xfa, 0xf2, 0x88, 0x1a, 0x0d, 0x66, 0x14, 0x5d, 0xd6, 0x03, - 0x68, 0x75, 0xde, 0xec, 0x69, 0xc5, 0xfa, 0xab, 0xc2, 0x38, 0xca, 0x16, 0xfa, 0x78, 0xca, 0x3d, - 0xe6, 0xd9, 0x2e, 0xa7, 0xc6, 0x11, 0x1c, 0x62, 0x58, 0xfe, 0x1d, 0x96, 0x9d, 0xa4, 0x5c, 0x11, - 0xeb, 0x42, 0xcb, 0xbf, 0x23, 0x10, 0x38, 0xa6, 0xa1, 0xc2, 0x94, 0xfa, 0x13, 0x4e, 0xa3, 0x38, - 0x0e, 0xad, 0xa2, 0x0e, 0xb1, 0x46, 0x81, 0xe6, 0x85, 0x42, 0x81, 0xdb, 0x05, 0x1e, 0x4e, 0x28, - 0x14, 0x64, 0x77, 0x69, 0x5a, 0xa0, 0xcb, 0x30, 0xac, 0xb2, 0xad, 0x57, 0x78, 0xe6, 0x2b, 0x31, - 0xcd, 0x56, 0x62, 0x30, 0xd6, 0x69, 0xd0, 0x06, 0x8c, 0x87, 0x5c, 0xcf, 0xa6, 0x82, 0x83, 0x73, - 0x7d, 0xe5, 0x7b, 0xd5, 0x63, 0x62, 0x13, 0x7d, 0xc8, 0x40, 0x7c, 0x77, 0x92, 0x21, 0x16, 0x92, - 0x2c, 0xd0, 0xab, 0x30, 0xd6, 0xf0, 0x9d, 0xfa, 0xa2, 0xd3, 0x70, 0xbc, 0x1a, 0xeb, 0x9f, 0x21, - 0x33, 0x69, 0xef, 0x75, 0x03, 0x8b, 0x13, 0xd4, 0x54, 0x78, 0xd3, 0x21, 0x22, 0x44, 0x98, 0xe3, - 0x6d, 0x93, 0x50, 0xe4, 0xce, 0x66, 0xc2, 0xdb, 0xf5, 0x1c, 0x1a, 0x9c, 0x5b, 0x1a, 0xbd, 0x04, - 0x23, 0xf2, 0xf3, 0xb5, 0x88, 0x24, 0xf1, 0x93, 0x18, 0x0d, 0x87, 0x0d, 0x4a, 0x74, 0x07, 0x4e, - 0xcb, 0xff, 0x1b, 0x81, 0xb3, 0xb5, 0xe5, 0xd6, 0xc4, 0x33, 0x7d, 0xfe, 0x76, 0x76, 0x41, 0x3e, - 0xf0, 0x5c, 0xc9, 0x22, 0x3a, 0x3c, 0x98, 0xbd, 0x20, 0x7a, 0x2d, 0x13, 0xcf, 0x06, 0x31, 0x9b, - 0x3f, 0x5a, 0x83, 0xa9, 0x1d, 0xe2, 0x34, 0xa2, 0x9d, 0xa5, 0x1d, 0x52, 0xdb, 0x95, 0x8b, 0x8e, - 0xc5, 0x39, 0xd1, 0x9e, 0x8f, 0x5c, 0x4d, 0x93, 0xe0, 0xac, 0x72, 0xe8, 0x0d, 0x98, 0x6e, 0xb5, - 0x37, 0x1b, 0x6e, 0xb8, 0xb3, 0xee, 0x47, 0xcc, 0x11, 0x49, 0x25, 0x6f, 0x17, 0x01, 0x51, 0x54, - 0x24, 0x99, 0x4a, 0x0e, 0x1d, 0xce, 0xe5, 0x80, 0xde, 0x82, 0xd3, 0x89, 0xc9, 0x20, 0x42, 0x42, - 0x8c, 0xe5, 0xa7, 0x07, 0xa9, 0x66, 0x15, 0x10, 0xd1, 0x55, 0xb2, 0x50, 0x38, 0xbb, 0x0a, 0xf4, - 0x32, 0x80, 0xdb, 0x5a, 0x75, 0x9a, 0x6e, 0x83, 0x5e, 0x17, 0xa7, 0xd8, 0x3c, 0xa1, 0x57, 0x07, - 0x28, 0x57, 0x24, 0x94, 0xee, 0xcf, 0xe2, 0xdf, 0x3e, 0xd6, 0xa8, 0xd1, 0x75, 0x18, 0x13, 0xff, - 0xf6, 0xc5, 0xb0, 0xf2, 0xc8, 0x24, 0x8f, 0xb3, 0xb0, 0x52, 0x15, 0x1d, 0x73, 0x98, 0x82, 0xe0, - 0x44, 0x59, 0xb4, 0x0d, 0xe7, 0x64, 0xaa, 0x37, 0x7d, 0x8e, 0xca, 0x31, 0x08, 0x59, 0x4e, 0x8e, - 0x21, 0xfe, 0x32, 0x65, 0xa1, 0x13, 0x21, 0xee, 0xcc, 0x87, 0x9e, 0xed, 0xfa, 0x54, 0xe7, 0x6f, - 0x77, 0x4f, 0x73, 0x2f, 0x27, 0x7a, 0xb6, 0x5f, 0x4f, 0x22, 0x71, 0x9a, 0x1e, 0x85, 0x70, 0xda, - 0xf5, 0xb2, 0x66, 0xf6, 0x19, 0xc6, 0xe8, 0x43, 0xfc, 0xd9, 0x72, 0xe7, 0x59, 0x9d, 0x89, 0xe7, - 0xb3, 0x3a, 0x93, 0xf7, 0xdb, 0xf3, 0xff, 0xfb, 0x2d, 0x8b, 0x96, 0xd6, 0xa4, 0x74, 0xf4, 0x29, - 0x18, 0xd1, 0x3f, 0x4c, 0x48, 0x1c, 0x17, 0xb3, 0x85, 0x58, 0x6d, 0x6f, 0xe0, 0x32, 0xbe, 0x5a, - 0xff, 0x3a, 0x0e, 0x1b, 0x1c, 0x51, 0x2d, 0xe3, 0x81, 0xff, 0x7c, 0x6f, 0x12, 0x4d, 0xef, 0xee, - 0x6f, 0x04, 0xb2, 0xa7, 0x3c, 0xba, 0x0e, 0x43, 0xb5, 0x86, 0x4b, 0xbc, 0xa8, 0x5c, 0xe9, 0x14, - 0xc2, 0x70, 0x49, 0xd0, 0x88, 0x35, 0x24, 0x52, 0x6c, 0x70, 0x18, 0x56, 0x1c, 0xec, 0x5f, 0x2d, - 0xc0, 0x6c, 0x97, 0x7c, 0x2d, 0x09, 0x73, 0x94, 0xd5, 0x93, 0x39, 0x6a, 0x01, 0xc6, 0xe3, 0x7f, - 0xba, 0xa6, 0x4b, 0x79, 0xb4, 0xde, 0x32, 0xd1, 0x38, 0x49, 0xdf, 0xf3, 0xe3, 0x04, 0xdd, 0xa2, - 0xd5, 0xd7, 0xf5, 0x79, 0x8d, 0x61, 0xc9, 0xee, 0xef, 0xfd, 0xfa, 0x9b, 0x6b, 0x95, 0xb4, 0xbf, - 0x56, 0x80, 0xd3, 0xaa, 0x0b, 0xbf, 0x7d, 0x3b, 0xee, 0x66, 0xba, 0xe3, 0x8e, 0xc1, 0xa6, 0x6b, - 0xdf, 0x80, 0x01, 0x1e, 0x93, 0xb1, 0x07, 0xb1, 0xfb, 0x31, 0x33, 0x52, 0xb3, 0x92, 0xf4, 0x8c, - 0x68, 0xcd, 0xdf, 0x6f, 0xc1, 0x78, 0xe2, 0x95, 0x1b, 0xc2, 0xda, 0x53, 0xe8, 0xfb, 0x11, 0x8d, - 0xb3, 0x84, 0xee, 0x0b, 0xd0, 0xb7, 0xe3, 0x87, 0x51, 0xd2, 0xe1, 0xe3, 0xaa, 0x1f, 0x46, 0x98, - 0x61, 0xec, 0xdf, 0xb1, 0xa0, 0x7f, 0xc3, 0x71, 0xbd, 0x48, 0x1a, 0x07, 0xac, 0x1c, 0xe3, 0x40, - 0x2f, 0xdf, 0x85, 0x5e, 0x84, 0x01, 0xb2, 0xb5, 0x45, 0x6a, 0x91, 0x18, 0x55, 0x19, 0x47, 0x62, - 0x60, 0x85, 0x41, 0xa9, 0x1c, 0xc8, 0x2a, 0xe3, 0x7f, 0xb1, 0x20, 0x46, 0xb7, 0xa1, 0x14, 0xb9, - 0x4d, 0xb2, 0x50, 0xaf, 0x0b, 0x93, 0xf9, 0x7d, 0xc4, 0xc2, 0xd8, 0x90, 0x0c, 0x70, 0xcc, 0xcb, - 0xfe, 0x42, 0x01, 0x20, 0x0e, 0x66, 0xd5, 0xed, 0x13, 0x17, 0x53, 0xc6, 0xd4, 0x8b, 0x19, 0xc6, - 0x54, 0x14, 0x33, 0xcc, 0xb0, 0xa4, 0xaa, 0x6e, 0x2a, 0xf6, 0xd4, 0x4d, 0x7d, 0x47, 0xe9, 0xa6, - 0x25, 0x98, 0x8c, 0x83, 0x71, 0x99, 0xb1, 0x08, 0xd9, 0xf1, 0xb9, 0x91, 0x44, 0xe2, 0x34, 0xbd, - 0x4d, 0xe0, 0x82, 0x8a, 0x49, 0x24, 0x4e, 0x34, 0xe6, 0x0f, 0xae, 0x1b, 0xa7, 0xbb, 0xf4, 0x53, - 0x6c, 0x2d, 0x2e, 0xe4, 0x5a, 0x8b, 0x7f, 0xc2, 0x82, 0x53, 0xc9, 0x7a, 0xd8, 0xe3, 0xe9, 0xcf, - 0x5b, 0x70, 0x9a, 0xd9, 0xcc, 0x59, 0xad, 0x69, 0x0b, 0xfd, 0x0b, 0x1d, 0xe3, 0x2c, 0xe5, 0xb4, - 0x38, 0x0e, 0x58, 0xb2, 0x96, 0xc5, 0x1a, 0x67, 0xd7, 0x68, 0xff, 0xaf, 0x3e, 0x98, 0xce, 0x0b, - 0xd0, 0xc4, 0x9e, 0x8b, 0x38, 0x77, 0xab, 0xbb, 0xe4, 0x8e, 0x70, 0xca, 0x8f, 0x9f, 0x8b, 0x70, - 0x30, 0x96, 0xf8, 0x64, 0x0a, 0x8e, 0x42, 0x8f, 0x29, 0x38, 0x76, 0x60, 0xf2, 0xce, 0x0e, 0xf1, - 0x6e, 0x7a, 0xa1, 0x13, 0xb9, 0xe1, 0x96, 0xcb, 0xec, 0xcb, 0x7c, 0xde, 0xc8, 0xbc, 0xbd, 0x93, - 0xb7, 0x93, 0x04, 0x87, 0x07, 0xb3, 0xe7, 0x0c, 0x40, 0xdc, 0x64, 0xbe, 0x91, 0xe0, 0x34, 0xd3, - 0x74, 0x06, 0x93, 0xbe, 0x07, 0x9c, 0xc1, 0xa4, 0xe9, 0x0a, 0xaf, 0x14, 0xf9, 0x16, 0x80, 0xdd, - 0x1c, 0xd7, 0x14, 0x14, 0x6b, 0x14, 0xe8, 0x13, 0x80, 0xf4, 0x0c, 0x4d, 0x46, 0x7c, 0xcc, 0x67, - 0xef, 0x1d, 0xcc, 0xa2, 0xf5, 0x14, 0xf6, 0xf0, 0x60, 0x76, 0x8a, 0x42, 0xcb, 0x1e, 0xbd, 0x81, - 0xc6, 0x41, 0xc5, 0x32, 0x18, 0xa1, 0xdb, 0x30, 0x41, 0xa1, 0x6c, 0x45, 0xc9, 0xe0, 0x9b, 0xfc, - 0xd6, 0xf8, 0xf4, 0xbd, 0x83, 0xd9, 0x89, 0xf5, 0x04, 0x2e, 0x8f, 0x75, 0x8a, 0x09, 0x7a, 0x19, - 0xc6, 0xe2, 0x79, 0x75, 0x8d, 0xec, 0xf3, 0x60, 0x37, 0x25, 0xae, 0xf8, 0x5e, 0x33, 0x30, 0x38, - 0x41, 0x69, 0x7f, 0xde, 0x82, 0xb3, 0xb9, 0x59, 0xc4, 0xd1, 0x25, 0x18, 0x72, 0x5a, 0x2e, 0x37, - 0x63, 0x88, 0xa3, 0x86, 0xa9, 0xcb, 0x2a, 0x65, 0x6e, 0xc4, 0x50, 0x58, 0xba, 0xc3, 0xef, 0xba, - 0x5e, 0x3d, 0xb9, 0xc3, 0x5f, 0x73, 0xbd, 0x3a, 0x66, 0x18, 0x75, 0x64, 0x15, 0x73, 0x9f, 0x24, - 0x7c, 0x85, 0xae, 0xd5, 0x8c, 0x7c, 0xe3, 0x27, 0xdb, 0x0c, 0xf4, 0xb4, 0x6e, 0x72, 0x14, 0xde, - 0x85, 0xb9, 0xe6, 0xc6, 0xef, 0xb3, 0x40, 0x3c, 0x61, 0xee, 0xe1, 0x4c, 0xfe, 0x18, 0x8c, 0xec, - 0xa5, 0xb3, 0xd7, 0x5d, 0xc8, 0x7f, 0xd3, 0x2d, 0xa2, 0x7e, 0x2b, 0x41, 0xdb, 0xc8, 0x54, 0x67, - 0xf0, 0xb2, 0xeb, 0x20, 0xb0, 0xcb, 0x84, 0x19, 0x16, 0xba, 0xb7, 0xe6, 0x39, 0x80, 0x3a, 0xa3, - 0x65, 0x29, 0x6d, 0x0b, 0xa6, 0xc4, 0xb5, 0xac, 0x30, 0x58, 0xa3, 0xb2, 0xff, 0x75, 0x01, 0x86, - 0x65, 0xb6, 0xb4, 0xb6, 0xd7, 0x8b, 0xfa, 0xef, 0x48, 0xe9, 0x93, 0xd1, 0x3c, 0x94, 0x98, 0x7e, - 0xba, 0x12, 0x6b, 0x4d, 0x95, 0x76, 0x68, 0x4d, 0x22, 0x70, 0x4c, 0x43, 0x77, 0xc7, 0xb0, 0xbd, - 0xc9, 0xc8, 0x13, 0x0f, 0x6e, 0xab, 0x1c, 0x8c, 0x25, 0x1e, 0x7d, 0x04, 0x26, 0x78, 0xb9, 0xc0, - 0x6f, 0x39, 0xdb, 0xdc, 0xa6, 0xd5, 0xaf, 0xa2, 0x98, 0x4c, 0xac, 0x25, 0x70, 0x87, 0x07, 0xb3, - 0xa7, 0x92, 0x30, 0x66, 0xac, 0x4d, 0x71, 0x61, 0xae, 0x6b, 0xbc, 0x12, 0xba, 0xab, 0xa7, 0x3c, - 0xde, 0x62, 0x14, 0xd6, 0xe9, 0xec, 0x4f, 0x01, 0x4a, 0xe7, 0x8d, 0x43, 0xaf, 0x71, 0xd7, 0x67, - 0x37, 0x20, 0xf5, 0x4e, 0xc6, 0x5b, 0x3d, 0x56, 0x87, 0x7c, 0x2b, 0xc7, 0x4b, 0x61, 0x55, 0xde, - 0xfe, 0x0b, 0x45, 0x98, 0x48, 0x46, 0x07, 0x40, 0x57, 0x61, 0x80, 0x8b, 0x94, 0x82, 0x7d, 0x07, - 0xdf, 0x20, 0x2d, 0xa6, 0x00, 0x3b, 0x5c, 0x85, 0x54, 0x2a, 0xca, 0xa3, 0x37, 0x60, 0xb8, 0xee, - 0xdf, 0xf1, 0xee, 0x38, 0x41, 0x7d, 0xa1, 0x52, 0x16, 0xd3, 0x39, 0x53, 0x59, 0xb1, 0x1c, 0x93, - 0xe9, 0x71, 0x0a, 0x98, 0x1d, 0x3c, 0x46, 0x61, 0x9d, 0x1d, 0xda, 0x60, 0xc9, 0x26, 0xb6, 0xdc, - 0xed, 0x35, 0xa7, 0xd5, 0xe9, 0x1d, 0xcc, 0x92, 0x24, 0xd2, 0x38, 0x8f, 0x8a, 0x8c, 0x14, 0x1c, - 0x81, 0x63, 0x46, 0xe8, 0x33, 0x30, 0x15, 0xe6, 0x98, 0x50, 0xf2, 0xd2, 0x88, 0x76, 0xb2, 0x2a, - 0x2c, 0x3e, 0x74, 0xef, 0x60, 0x76, 0x2a, 0xcb, 0xd8, 0x92, 0x55, 0x8d, 0xfd, 0xc5, 0x53, 0x60, - 0x2c, 0x62, 0x23, 0xab, 0xb4, 0x75, 0x4c, 0x59, 0xa5, 0x31, 0x0c, 0x91, 0x66, 0x2b, 0xda, 0x5f, - 0x76, 0x03, 0x31, 0x26, 0x99, 0x3c, 0x57, 0x04, 0x4d, 0x9a, 0xa7, 0xc4, 0x60, 0xc5, 0x27, 0x3b, - 0xf5, 0x77, 0xf1, 0x9b, 0x98, 0xfa, 0xbb, 0xef, 0x04, 0x53, 0x7f, 0xaf, 0xc3, 0xe0, 0xb6, 0x1b, - 0x61, 0xd2, 0xf2, 0xc5, 0x65, 0x2e, 0x73, 0x1e, 0x5e, 0xe1, 0x24, 0xe9, 0x24, 0xb3, 0x02, 0x81, - 0x25, 0x13, 0xf4, 0x9a, 0x5a, 0x81, 0x03, 0xf9, 0x0a, 0x97, 0xb4, 0x13, 0x4b, 0xe6, 0x1a, 0x14, - 0x09, 0xbe, 0x07, 0xef, 0x37, 0xc1, 0xf7, 0xaa, 0x4c, 0xcb, 0x3d, 0x94, 0xff, 0x68, 0x8d, 0x65, - 0xdd, 0xee, 0x92, 0x8c, 0xfb, 0x96, 0x9e, 0xca, 0xbc, 0x94, 0xbf, 0x13, 0xa8, 0x2c, 0xe5, 0x3d, - 0x26, 0x30, 0xff, 0x3e, 0x0b, 0x4e, 0xb7, 0xb2, 0xb2, 0xfa, 0x0b, 0x7f, 0x8f, 0x17, 0x7b, 0xc9, - 0xfd, 0xca, 0x0a, 0x18, 0x15, 0x32, 0x3d, 0x69, 0x26, 0x19, 0xce, 0xae, 0x8e, 0x76, 0x74, 0xb0, - 0x59, 0x17, 0x7e, 0x07, 0x8f, 0xe5, 0x64, 0x42, 0xef, 0x90, 0xff, 0x7c, 0x23, 0x23, 0xeb, 0xf6, - 0xe3, 0x79, 0x59, 0xb7, 0x7b, 0xce, 0xb5, 0xfd, 0x9a, 0xca, 0x81, 0x3e, 0x9a, 0x3f, 0x95, 0x78, - 0x86, 0xf3, 0xae, 0x99, 0xcf, 0x5f, 0x53, 0x99, 0xcf, 0x3b, 0x84, 0xd7, 0xe6, 0x79, 0xcd, 0xbb, - 0xe6, 0x3b, 0xd7, 0x72, 0x96, 0x8f, 0x1f, 0x4f, 0xce, 0x72, 0xe3, 0xa8, 0xe1, 0x69, 0xb3, 0x9f, - 0xee, 0x72, 0xd4, 0x18, 0x7c, 0x3b, 0x1f, 0x36, 0x3c, 0x3f, 0xfb, 0xe4, 0x7d, 0xe5, 0x67, 0xbf, - 0xa5, 0xe7, 0x3b, 0x47, 0x5d, 0x12, 0x7a, 0x53, 0xa2, 0x1e, 0xb3, 0x9c, 0xdf, 0xd2, 0x0f, 0xc0, - 0xa9, 0x7c, 0xbe, 0xea, 0x9c, 0x4b, 0xf3, 0xcd, 0x3c, 0x02, 0x53, 0xd9, 0xd3, 0x4f, 0x9d, 0x4c, - 0xf6, 0xf4, 0xd3, 0xc7, 0x9e, 0x3d, 0xfd, 0xcc, 0x09, 0x64, 0x4f, 0x7f, 0xe8, 0x04, 0xb3, 0xa7, - 0xdf, 0x62, 0x4e, 0x52, 0x3c, 0x10, 0x94, 0x08, 0x07, 0xfe, 0x54, 0x4e, 0x1c, 0xb5, 0x74, 0xb4, - 0x28, 0xfe, 0x71, 0x0a, 0x85, 0x63, 0x56, 0x19, 0x59, 0xd9, 0xa7, 0x1f, 0x40, 0x56, 0xf6, 0xf5, - 0x38, 0x2b, 0xfb, 0xd9, 0xfc, 0xa1, 0xce, 0x78, 0x56, 0x93, 0x93, 0x8b, 0xfd, 0x96, 0x9e, 0x43, - 0xfd, 0xe1, 0x0e, 0x96, 0xb0, 0x2c, 0x85, 0x72, 0x87, 0xcc, 0xe9, 0xaf, 0xf2, 0xcc, 0xe9, 0x8f, - 0xe4, 0xef, 0xe4, 0xc9, 0xe3, 0xce, 0xc8, 0x97, 0x4e, 0xdb, 0xa5, 0x02, 0xa9, 0xb2, 0xc0, 0xe7, - 0x39, 0xed, 0x52, 0x91, 0x58, 0xd3, 0xed, 0x52, 0x28, 0x1c, 0xb3, 0xb2, 0x7f, 0xa0, 0x00, 0xe7, - 0x3b, 0xaf, 0xb7, 0x58, 0x4b, 0x5e, 0x89, 0x1d, 0x03, 0x12, 0x5a, 0x72, 0x7e, 0x67, 0x8b, 0xa9, - 0x7a, 0x8e, 0x0b, 0x79, 0x05, 0x26, 0xd5, 0x7b, 0x9c, 0x86, 0x5b, 0xdb, 0x5f, 0x8f, 0xaf, 0xc9, - 0x2a, 0x82, 0x42, 0x35, 0x49, 0x80, 0xd3, 0x65, 0xd0, 0x02, 0x8c, 0x1b, 0xc0, 0xf2, 0xb2, 0xb8, - 0x9b, 0xc5, 0xa1, 0xb6, 0x4d, 0x34, 0x4e, 0xd2, 0xdb, 0x5f, 0xb2, 0xe0, 0xa1, 0x9c, 0xb4, 0xa3, - 0x3d, 0x87, 0x3d, 0xdc, 0x82, 0xf1, 0x96, 0x59, 0xb4, 0x4b, 0xa4, 0x56, 0x23, 0xb9, 0xa9, 0x6a, - 0x6b, 0x02, 0x81, 0x93, 0x4c, 0xed, 0x9f, 0x2e, 0xc0, 0xb9, 0x8e, 0x0e, 0xa6, 0x08, 0xc3, 0x99, - 0xed, 0x66, 0xe8, 0x2c, 0x05, 0xa4, 0x4e, 0xbc, 0xc8, 0x75, 0x1a, 0xd5, 0x16, 0xa9, 0x69, 0x76, - 0x0e, 0xe6, 0xa9, 0x79, 0x65, 0xad, 0xba, 0x90, 0xa6, 0xc0, 0x39, 0x25, 0xd1, 0x2a, 0xa0, 0x34, - 0x46, 0x8c, 0x30, 0x0b, 0xa1, 0x9f, 0xe6, 0x87, 0x33, 0x4a, 0xa0, 0x0f, 0xc0, 0xa8, 0x72, 0x5c, - 0xd5, 0x46, 0x9c, 0x6d, 0xec, 0x58, 0x47, 0x60, 0x93, 0x0e, 0x5d, 0xe6, 0x39, 0x18, 0x44, 0xb6, - 0x0e, 0x61, 0x14, 0x19, 0x97, 0x09, 0x16, 0x04, 0x18, 0xeb, 0x34, 0x8b, 0x2f, 0xfd, 0xda, 0xef, - 0x9d, 0x7f, 0xcf, 0x6f, 0xfc, 0xde, 0xf9, 0xf7, 0xfc, 0xf6, 0xef, 0x9d, 0x7f, 0xcf, 0x77, 0xdd, - 0x3b, 0x6f, 0xfd, 0xda, 0xbd, 0xf3, 0xd6, 0x6f, 0xdc, 0x3b, 0x6f, 0xfd, 0xf6, 0xbd, 0xf3, 0xd6, - 0xef, 0xde, 0x3b, 0x6f, 0x7d, 0xe1, 0xf7, 0xcf, 0xbf, 0xe7, 0x63, 0x28, 0x0e, 0x24, 0x3a, 0x4f, - 0x47, 0x67, 0x7e, 0xef, 0xf2, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x52, 0x56, 0xa0, 0x3b, 0xf7, - 0x0c, 0x01, 0x00, + // 14685 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x90, 0x5c, 0xd7, + 0x75, 0x18, 0xac, 0xd7, 0x3d, 0x5b, 0x9f, 0xd9, 0xef, 0x00, 0xe0, 0x60, 0x48, 0xa0, 0xc1, 0x47, + 0x12, 0x04, 0x45, 0x72, 0x20, 0x70, 0x91, 0x28, 0x52, 0xa2, 0x35, 0x2b, 0x30, 0x04, 0x66, 0xd0, + 0xbc, 0x3d, 0x00, 0x24, 0x8a, 0x52, 0xe9, 0x4d, 0xf7, 0x9d, 0x99, 0xa7, 0xe9, 0x7e, 0xaf, 0xf9, + 0xde, 0xeb, 0x01, 0x06, 0x9f, 0x54, 0x9f, 0x2d, 0xc7, 0x8b, 0x6c, 0x27, 0xa5, 0x4a, 0x39, 0x4b, + 0xc9, 0x2e, 0x57, 0xca, 0x76, 0x6c, 0x2b, 0xca, 0xa6, 0xc8, 0xb1, 0x1d, 0xcb, 0x5b, 0xb6, 0x8a, + 0x93, 0x4a, 0x39, 0x8e, 0xab, 0x62, 0xb9, 0xe2, 0xca, 0xc4, 0x82, 0x53, 0xe5, 0x52, 0x55, 0x62, + 0x3b, 0xcb, 0x8f, 0x64, 0xe2, 0xc4, 0xa9, 0xbb, 0xbe, 0x7b, 0xdf, 0xd2, 0xdd, 0x03, 0x0e, 0x46, + 0x94, 0x8a, 0xff, 0xba, 0xcf, 0x39, 0xf7, 0xdc, 0xfb, 0xee, 0x7a, 0xee, 0x39, 0xe7, 0x9e, 0x03, + 0xaf, 0xec, 0xbc, 0x14, 0xce, 0xba, 0xfe, 0xc5, 0x9d, 0xf6, 0x06, 0x09, 0x3c, 0x12, 0x91, 0xf0, + 0xe2, 0x2e, 0xf1, 0xea, 0x7e, 0x70, 0x51, 0x20, 0x9c, 0x96, 0x7b, 0xb1, 0xe6, 0x07, 0xe4, 0xe2, + 0xee, 0xa5, 0x8b, 0x5b, 0xc4, 0x23, 0x81, 0x13, 0x91, 0xfa, 0x6c, 0x2b, 0xf0, 0x23, 0x1f, 0x21, + 0x4e, 0x33, 0xeb, 0xb4, 0xdc, 0x59, 0x4a, 0x33, 0xbb, 0x7b, 0x69, 0xe6, 0xd9, 0x2d, 0x37, 0xda, + 0x6e, 0x6f, 0xcc, 0xd6, 0xfc, 0xe6, 0xc5, 0x2d, 0x7f, 0xcb, 0xbf, 0xc8, 0x48, 0x37, 0xda, 0x9b, + 0xec, 0x1f, 0xfb, 0xc3, 0x7e, 0x71, 0x16, 0x33, 0x2f, 0xc4, 0xd5, 0x34, 0x9d, 0xda, 0xb6, 0xeb, + 0x91, 0x60, 0xef, 0x62, 0x6b, 0x67, 0x8b, 0xd5, 0x1b, 0x90, 0xd0, 0x6f, 0x07, 0x35, 0x92, 0xac, + 0xb8, 0x63, 0xa9, 0xf0, 0x62, 0x93, 0x44, 0x4e, 0x46, 0x73, 0x67, 0x2e, 0xe6, 0x95, 0x0a, 0xda, + 0x5e, 0xe4, 0x36, 0xd3, 0xd5, 0xbc, 0xbf, 0x5b, 0x81, 0xb0, 0xb6, 0x4d, 0x9a, 0x4e, 0xaa, 0xdc, + 0xf3, 0x79, 0xe5, 0xda, 0x91, 0xdb, 0xb8, 0xe8, 0x7a, 0x51, 0x18, 0x05, 0xc9, 0x42, 0xf6, 0xd7, + 0x2d, 0x38, 0x37, 0x77, 0xab, 0xba, 0xd4, 0x70, 0xc2, 0xc8, 0xad, 0xcd, 0x37, 0xfc, 0xda, 0x4e, + 0x35, 0xf2, 0x03, 0x72, 0xd3, 0x6f, 0xb4, 0x9b, 0xa4, 0xca, 0x3a, 0x02, 0x3d, 0x03, 0x43, 0xbb, + 0xec, 0xff, 0xca, 0xe2, 0xb4, 0x75, 0xce, 0xba, 0x50, 0x9a, 0x9f, 0xf8, 0xcd, 0xfd, 0xf2, 0x7b, + 0xee, 0xed, 0x97, 0x87, 0x6e, 0x0a, 0x38, 0x56, 0x14, 0xe8, 0x3c, 0x0c, 0x6c, 0x86, 0xeb, 0x7b, + 0x2d, 0x32, 0x5d, 0x60, 0xb4, 0x63, 0x82, 0x76, 0x60, 0xb9, 0x4a, 0xa1, 0x58, 0x60, 0xd1, 0x45, + 0x28, 0xb5, 0x9c, 0x20, 0x72, 0x23, 0xd7, 0xf7, 0xa6, 0x8b, 0xe7, 0xac, 0x0b, 0xfd, 0xf3, 0x93, + 0x82, 0xb4, 0x54, 0x91, 0x08, 0x1c, 0xd3, 0xd0, 0x66, 0x04, 0xc4, 0xa9, 0x5f, 0xf7, 0x1a, 0x7b, + 0xd3, 0x7d, 0xe7, 0xac, 0x0b, 0x43, 0x71, 0x33, 0xb0, 0x80, 0x63, 0x45, 0x61, 0x7f, 0xb1, 0x00, + 0x43, 0x73, 0x9b, 0x9b, 0xae, 0xe7, 0x46, 0x7b, 0xe8, 0x26, 0x8c, 0x78, 0x7e, 0x9d, 0xc8, 0xff, + 0xec, 0x2b, 0x86, 0x9f, 0x3b, 0x37, 0x9b, 0x9e, 0x4a, 0xb3, 0x6b, 0x1a, 0xdd, 0xfc, 0xc4, 0xbd, + 0xfd, 0xf2, 0x88, 0x0e, 0xc1, 0x06, 0x1f, 0x84, 0x61, 0xb8, 0xe5, 0xd7, 0x15, 0xdb, 0x02, 0x63, + 0x5b, 0xce, 0x62, 0x5b, 0x89, 0xc9, 0xe6, 0xc7, 0xef, 0xed, 0x97, 0x87, 0x35, 0x00, 0xd6, 0x99, + 0xa0, 0x0d, 0x18, 0xa7, 0x7f, 0xbd, 0xc8, 0x55, 0x7c, 0x8b, 0x8c, 0xef, 0x63, 0x79, 0x7c, 0x35, + 0xd2, 0xf9, 0xa9, 0x7b, 0xfb, 0xe5, 0xf1, 0x04, 0x10, 0x27, 0x19, 0xda, 0x77, 0x61, 0x6c, 0x2e, + 0x8a, 0x9c, 0xda, 0x36, 0xa9, 0xf3, 0x11, 0x44, 0x2f, 0x40, 0x9f, 0xe7, 0x34, 0x89, 0x18, 0xdf, + 0x73, 0xa2, 0x63, 0xfb, 0xd6, 0x9c, 0x26, 0x39, 0xd8, 0x2f, 0x4f, 0xdc, 0xf0, 0xdc, 0xb7, 0xda, + 0x62, 0x56, 0x50, 0x18, 0x66, 0xd4, 0xe8, 0x39, 0x80, 0x3a, 0xd9, 0x75, 0x6b, 0xa4, 0xe2, 0x44, + 0xdb, 0x62, 0xbc, 0x91, 0x28, 0x0b, 0x8b, 0x0a, 0x83, 0x35, 0x2a, 0xfb, 0x0e, 0x94, 0xe6, 0x76, + 0x7d, 0xb7, 0x5e, 0xf1, 0xeb, 0x21, 0xda, 0x81, 0xf1, 0x56, 0x40, 0x36, 0x49, 0xa0, 0x40, 0xd3, + 0xd6, 0xb9, 0xe2, 0x85, 0xe1, 0xe7, 0x2e, 0x64, 0x7e, 0xac, 0x49, 0xba, 0xe4, 0x45, 0xc1, 0xde, + 0xfc, 0x43, 0xa2, 0xbe, 0xf1, 0x04, 0x16, 0x27, 0x39, 0xdb, 0xff, 0xac, 0x00, 0x27, 0xe7, 0xee, + 0xb6, 0x03, 0xb2, 0xe8, 0x86, 0x3b, 0xc9, 0x19, 0x5e, 0x77, 0xc3, 0x9d, 0xb5, 0xb8, 0x07, 0xd4, + 0xd4, 0x5a, 0x14, 0x70, 0xac, 0x28, 0xd0, 0xb3, 0x30, 0x48, 0x7f, 0xdf, 0xc0, 0x2b, 0xe2, 0x93, + 0xa7, 0x04, 0xf1, 0xf0, 0xa2, 0x13, 0x39, 0x8b, 0x1c, 0x85, 0x25, 0x0d, 0x5a, 0x85, 0xe1, 0x1a, + 0x5b, 0x90, 0x5b, 0xab, 0x7e, 0x9d, 0xb0, 0xc1, 0x2c, 0xcd, 0x3f, 0x4d, 0xc9, 0x17, 0x62, 0xf0, + 0xc1, 0x7e, 0x79, 0x9a, 0xb7, 0x4d, 0xb0, 0xd0, 0x70, 0x58, 0x2f, 0x8f, 0x6c, 0xb5, 0xbe, 0xfa, + 0x18, 0x27, 0xc8, 0x58, 0x5b, 0x17, 0xb4, 0xa5, 0xd2, 0xcf, 0x96, 0xca, 0x48, 0xf6, 0x32, 0x41, + 0x97, 0xa0, 0x6f, 0xc7, 0xf5, 0xea, 0xd3, 0x03, 0x8c, 0xd7, 0x19, 0x3a, 0xe6, 0x57, 0x5d, 0xaf, + 0x7e, 0xb0, 0x5f, 0x9e, 0x34, 0x9a, 0x43, 0x81, 0x98, 0x91, 0xda, 0xff, 0xdd, 0x82, 0x32, 0xc3, + 0x2d, 0xbb, 0x0d, 0x52, 0x21, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0xa3, 0x43, 0x9f, 0x03, 0x08, + 0x49, 0x2d, 0x20, 0x91, 0xd6, 0xa5, 0x6a, 0x62, 0x54, 0x15, 0x06, 0x6b, 0x54, 0x74, 0x43, 0x08, + 0xb7, 0x9d, 0x80, 0xcd, 0x2f, 0xd1, 0xb1, 0x6a, 0x43, 0xa8, 0x4a, 0x04, 0x8e, 0x69, 0x8c, 0x0d, + 0xa1, 0xd8, 0x6d, 0x43, 0x40, 0x1f, 0x86, 0xf1, 0xb8, 0xb2, 0xb0, 0xe5, 0xd4, 0x64, 0x07, 0xb2, + 0x25, 0x53, 0x35, 0x51, 0x38, 0x49, 0x6b, 0xff, 0x2d, 0x4b, 0x4c, 0x1e, 0xfa, 0xd5, 0xef, 0xf0, + 0x6f, 0xb5, 0x7f, 0xc9, 0x82, 0xc1, 0x79, 0xd7, 0xab, 0xbb, 0xde, 0x16, 0xfa, 0x14, 0x0c, 0xd1, + 0xb3, 0xa9, 0xee, 0x44, 0x8e, 0xd8, 0xf7, 0xde, 0xa7, 0xad, 0x2d, 0x75, 0x54, 0xcc, 0xb6, 0x76, + 0xb6, 0x28, 0x20, 0x9c, 0xa5, 0xd4, 0x74, 0xb5, 0x5d, 0xdf, 0xf8, 0x34, 0xa9, 0x45, 0xab, 0x24, + 0x72, 0xe2, 0xcf, 0x89, 0x61, 0x58, 0x71, 0x45, 0x57, 0x61, 0x20, 0x72, 0x82, 0x2d, 0x12, 0x89, + 0x0d, 0x30, 0x73, 0xa3, 0xe2, 0x25, 0x31, 0x5d, 0x91, 0xc4, 0xab, 0x91, 0xf8, 0x58, 0x58, 0x67, + 0x45, 0xb1, 0x60, 0x61, 0xff, 0x9f, 0x41, 0x38, 0xbd, 0x50, 0x5d, 0xc9, 0x99, 0x57, 0xe7, 0x61, + 0xa0, 0x1e, 0xb8, 0xbb, 0x24, 0x10, 0xfd, 0xac, 0xb8, 0x2c, 0x32, 0x28, 0x16, 0x58, 0xf4, 0x12, + 0x8c, 0xf0, 0x03, 0xe9, 0x8a, 0xe3, 0xd5, 0x1b, 0xb2, 0x8b, 0x4f, 0x08, 0xea, 0x91, 0x9b, 0x1a, + 0x0e, 0x1b, 0x94, 0x87, 0x9c, 0x54, 0xe7, 0x13, 0x8b, 0x31, 0xef, 0xb0, 0xfb, 0xbc, 0x05, 0x13, + 0xbc, 0x9a, 0xb9, 0x28, 0x0a, 0xdc, 0x8d, 0x76, 0x44, 0xc2, 0xe9, 0x7e, 0xb6, 0xd3, 0x2d, 0x64, + 0xf5, 0x56, 0x6e, 0x0f, 0xcc, 0xde, 0x4c, 0x70, 0xe1, 0x9b, 0xe0, 0xb4, 0xa8, 0x77, 0x22, 0x89, + 0xc6, 0xa9, 0x6a, 0xd1, 0xf7, 0x5a, 0x30, 0x53, 0xf3, 0xbd, 0x28, 0xf0, 0x1b, 0x0d, 0x12, 0x54, + 0xda, 0x1b, 0x0d, 0x37, 0xdc, 0xe6, 0xf3, 0x14, 0x93, 0x4d, 0xb6, 0x13, 0xe4, 0x8c, 0xa1, 0x22, + 0x12, 0x63, 0x78, 0xf6, 0xde, 0x7e, 0x79, 0x66, 0x21, 0x97, 0x15, 0xee, 0x50, 0x0d, 0xda, 0x01, + 0x44, 0x8f, 0xd2, 0x6a, 0xe4, 0x6c, 0x91, 0xb8, 0xf2, 0xc1, 0xde, 0x2b, 0x3f, 0x75, 0x6f, 0xbf, + 0x8c, 0xd6, 0x52, 0x2c, 0x70, 0x06, 0x5b, 0xf4, 0x16, 0x9c, 0xa0, 0xd0, 0xd4, 0xb7, 0x0e, 0xf5, + 0x5e, 0xdd, 0xf4, 0xbd, 0xfd, 0xf2, 0x89, 0xb5, 0x0c, 0x26, 0x38, 0x93, 0x35, 0xfa, 0x6e, 0x0b, + 0x4e, 0xc7, 0x9f, 0xbf, 0x74, 0xa7, 0xe5, 0x78, 0xf5, 0xb8, 0xe2, 0x52, 0xef, 0x15, 0xd3, 0x3d, + 0xf9, 0xf4, 0x42, 0x1e, 0x27, 0x9c, 0x5f, 0x09, 0xf2, 0x60, 0x8a, 0x36, 0x2d, 0x59, 0x37, 0xf4, + 0x5e, 0xf7, 0x43, 0xf7, 0xf6, 0xcb, 0x53, 0x6b, 0x69, 0x1e, 0x38, 0x8b, 0xf1, 0xcc, 0x02, 0x9c, + 0xcc, 0x9c, 0x9d, 0x68, 0x02, 0x8a, 0x3b, 0x84, 0x4b, 0x5d, 0x25, 0x4c, 0x7f, 0xa2, 0x13, 0xd0, + 0xbf, 0xeb, 0x34, 0xda, 0x62, 0x61, 0x62, 0xfe, 0xe7, 0xe5, 0xc2, 0x4b, 0x96, 0xfd, 0xcf, 0x8b, + 0x30, 0xbe, 0x50, 0x5d, 0xb9, 0xaf, 0x55, 0xaf, 0x1f, 0x7b, 0x85, 0x8e, 0xc7, 0x5e, 0x7c, 0x88, + 0x16, 0x73, 0x0f, 0xd1, 0xff, 0x3f, 0x63, 0xc9, 0xf6, 0xb1, 0x25, 0xfb, 0xc1, 0x9c, 0x25, 0x7b, + 0xc4, 0x0b, 0x75, 0x37, 0x67, 0xd6, 0xf6, 0xb3, 0x01, 0xcc, 0x94, 0x90, 0xae, 0xf9, 0x35, 0xa7, + 0x91, 0xdc, 0x6a, 0x0f, 0x39, 0x75, 0x8f, 0x66, 0x1c, 0x6b, 0x30, 0xb2, 0xe0, 0xb4, 0x9c, 0x0d, + 0xb7, 0xe1, 0x46, 0x2e, 0x09, 0xd1, 0x93, 0x50, 0x74, 0xea, 0x75, 0x26, 0xdd, 0x95, 0xe6, 0x4f, + 0xde, 0xdb, 0x2f, 0x17, 0xe7, 0xea, 0x54, 0xcc, 0x00, 0x45, 0xb5, 0x87, 0x29, 0x05, 0x7a, 0x2f, + 0xf4, 0xd5, 0x03, 0xbf, 0x35, 0x5d, 0x60, 0x94, 0x74, 0x95, 0xf7, 0x2d, 0x06, 0x7e, 0x2b, 0x41, + 0xca, 0x68, 0xec, 0xdf, 0x28, 0xc0, 0x23, 0x0b, 0xa4, 0xb5, 0xbd, 0x5c, 0xcd, 0x39, 0x2f, 0x2e, + 0xc0, 0x50, 0xd3, 0xf7, 0xdc, 0xc8, 0x0f, 0x42, 0x51, 0x35, 0x9b, 0x11, 0xab, 0x02, 0x86, 0x15, + 0x16, 0x9d, 0x83, 0xbe, 0x56, 0x2c, 0xc4, 0x8e, 0x48, 0x01, 0x98, 0x89, 0xaf, 0x0c, 0x43, 0x29, + 0xda, 0x21, 0x09, 0xc4, 0x8c, 0x51, 0x14, 0x37, 0x42, 0x12, 0x60, 0x86, 0x89, 0x25, 0x01, 0x2a, + 0x23, 0x88, 0x13, 0x21, 0x21, 0x09, 0x50, 0x0c, 0xd6, 0xa8, 0x50, 0x05, 0x4a, 0x61, 0x62, 0x64, + 0x7b, 0x5a, 0x9a, 0xa3, 0x4c, 0x54, 0x50, 0x23, 0x19, 0x33, 0x31, 0x4e, 0xb0, 0x81, 0xae, 0xa2, + 0xc2, 0xd7, 0x0a, 0x80, 0x78, 0x17, 0x7e, 0x9b, 0x75, 0xdc, 0x8d, 0x74, 0xc7, 0xf5, 0xbe, 0x24, + 0x8e, 0xaa, 0xf7, 0xfe, 0x87, 0x05, 0x8f, 0x2c, 0xb8, 0x5e, 0x9d, 0x04, 0x39, 0x13, 0xf0, 0xc1, + 0xdc, 0x9d, 0x0f, 0x27, 0xa4, 0x18, 0x53, 0xac, 0xef, 0x08, 0xa6, 0x98, 0xfd, 0x27, 0x16, 0x20, + 0xfe, 0xd9, 0xef, 0xb8, 0x8f, 0xbd, 0x91, 0xfe, 0xd8, 0x23, 0x98, 0x16, 0xf6, 0xdf, 0xb3, 0x60, + 0x78, 0xa1, 0xe1, 0xb8, 0x4d, 0xf1, 0xa9, 0x0b, 0x30, 0x29, 0x15, 0x45, 0x0c, 0xac, 0xc9, 0xfe, + 0x74, 0x73, 0x9b, 0xc4, 0x49, 0x24, 0x4e, 0xd3, 0xa3, 0x8f, 0xc3, 0x69, 0x03, 0xb8, 0x4e, 0x9a, + 0xad, 0x86, 0x13, 0xe9, 0xb7, 0x02, 0x76, 0xfa, 0xe3, 0x3c, 0x22, 0x9c, 0x5f, 0xde, 0xbe, 0x06, + 0x63, 0x0b, 0x0d, 0x97, 0x78, 0xd1, 0x4a, 0x65, 0xc1, 0xf7, 0x36, 0xdd, 0x2d, 0xf4, 0x32, 0x8c, + 0x45, 0x6e, 0x93, 0xf8, 0xed, 0xa8, 0x4a, 0x6a, 0xbe, 0xc7, 0xee, 0xda, 0xd6, 0x85, 0xfe, 0x79, + 0x74, 0x6f, 0xbf, 0x3c, 0xb6, 0x6e, 0x60, 0x70, 0x82, 0xd2, 0xfe, 0x7d, 0x3a, 0xe2, 0x7e, 0xb3, + 0xe5, 0x7b, 0xc4, 0x8b, 0x16, 0x7c, 0xaf, 0xce, 0x75, 0x32, 0x2f, 0x43, 0x5f, 0x44, 0x47, 0x90, + 0x7f, 0xf9, 0x79, 0xb9, 0xb4, 0xe9, 0xb8, 0x1d, 0xec, 0x97, 0x4f, 0xa5, 0x4b, 0xb0, 0x91, 0x65, + 0x65, 0xd0, 0x07, 0x61, 0x20, 0x8c, 0x9c, 0xa8, 0x1d, 0x8a, 0x4f, 0x7d, 0x54, 0x8e, 0x7f, 0x95, + 0x41, 0x0f, 0xf6, 0xcb, 0xe3, 0xaa, 0x18, 0x07, 0x61, 0x51, 0x00, 0x3d, 0x05, 0x83, 0x4d, 0x12, + 0x86, 0xce, 0x96, 0x3c, 0xbf, 0xc7, 0x45, 0xd9, 0xc1, 0x55, 0x0e, 0xc6, 0x12, 0x8f, 0x1e, 0x83, + 0x7e, 0x12, 0x04, 0x7e, 0x20, 0x76, 0x95, 0x51, 0x41, 0xd8, 0xbf, 0x44, 0x81, 0x98, 0xe3, 0xec, + 0x7f, 0x63, 0xc1, 0xb8, 0x6a, 0x2b, 0xaf, 0xeb, 0x18, 0xee, 0x4d, 0x6f, 0x00, 0xd4, 0xe4, 0x07, + 0x86, 0xec, 0xbc, 0x1b, 0x7e, 0xee, 0x7c, 0xa6, 0x68, 0x91, 0xea, 0xc6, 0x98, 0xb3, 0x02, 0x85, + 0x58, 0xe3, 0x66, 0xff, 0xaa, 0x05, 0x53, 0x89, 0x2f, 0xba, 0xe6, 0x86, 0x11, 0x7a, 0x33, 0xf5, + 0x55, 0xb3, 0xbd, 0x7d, 0x15, 0x2d, 0xcd, 0xbe, 0x49, 0x2d, 0x3e, 0x09, 0xd1, 0xbe, 0xe8, 0x0a, + 0xf4, 0xbb, 0x11, 0x69, 0xca, 0x8f, 0x79, 0xac, 0xe3, 0xc7, 0xf0, 0x56, 0xc5, 0x23, 0xb2, 0x42, + 0x4b, 0x62, 0xce, 0xc0, 0xfe, 0x8d, 0x22, 0x94, 0xf8, 0xb4, 0x5d, 0x75, 0x5a, 0xc7, 0x30, 0x16, + 0x4f, 0x43, 0xc9, 0x6d, 0x36, 0xdb, 0x91, 0xb3, 0x21, 0x0e, 0xa0, 0x21, 0xbe, 0x19, 0xac, 0x48, + 0x20, 0x8e, 0xf1, 0x68, 0x05, 0xfa, 0x58, 0x53, 0xf8, 0x57, 0x3e, 0x99, 0xfd, 0x95, 0xa2, 0xed, + 0xb3, 0x8b, 0x4e, 0xe4, 0x70, 0xd9, 0x4f, 0x9d, 0x7c, 0x14, 0x84, 0x19, 0x0b, 0xe4, 0x00, 0x6c, + 0xb8, 0x9e, 0x13, 0xec, 0x51, 0xd8, 0x74, 0x91, 0x31, 0x7c, 0xb6, 0x33, 0xc3, 0x79, 0x45, 0xcf, + 0xd9, 0xaa, 0x0f, 0x8b, 0x11, 0x58, 0x63, 0x3a, 0xf3, 0x01, 0x28, 0x29, 0xe2, 0xc3, 0x88, 0x70, + 0x33, 0x1f, 0x86, 0xf1, 0x44, 0x5d, 0xdd, 0x8a, 0x8f, 0xe8, 0x12, 0xe0, 0x2f, 0xb3, 0x2d, 0x43, + 0xb4, 0x7a, 0xc9, 0xdb, 0x15, 0x3b, 0xe7, 0x5d, 0x38, 0xd1, 0xc8, 0xd8, 0x7b, 0xc5, 0xb8, 0xf6, + 0xbe, 0x57, 0x3f, 0x22, 0x3e, 0xfb, 0x44, 0x16, 0x16, 0x67, 0xd6, 0x41, 0xa5, 0x1a, 0xbf, 0x45, + 0x17, 0x88, 0xd3, 0xd0, 0x2f, 0x08, 0xd7, 0x05, 0x0c, 0x2b, 0x2c, 0xdd, 0xef, 0x4e, 0xa8, 0xc6, + 0x5f, 0x25, 0x7b, 0x55, 0xd2, 0x20, 0xb5, 0xc8, 0x0f, 0xbe, 0xa5, 0xcd, 0x3f, 0xc3, 0x7b, 0x9f, + 0x6f, 0x97, 0xc3, 0x82, 0x41, 0xf1, 0x2a, 0xd9, 0xe3, 0x43, 0xa1, 0x7f, 0x5d, 0xb1, 0xe3, 0xd7, + 0x7d, 0xc5, 0x82, 0x51, 0xf5, 0x75, 0xc7, 0xb0, 0x2f, 0xcc, 0x9b, 0xfb, 0xc2, 0x99, 0x8e, 0x13, + 0x3c, 0x67, 0x47, 0xf8, 0x5a, 0x01, 0x4e, 0x2b, 0x1a, 0x7a, 0x9b, 0xe1, 0x7f, 0xc4, 0xac, 0xba, + 0x08, 0x25, 0x4f, 0xe9, 0xf5, 0x2c, 0x53, 0xa1, 0x16, 0x6b, 0xf5, 0x62, 0x1a, 0x2a, 0x94, 0x7a, + 0xf1, 0x31, 0x3b, 0xa2, 0x2b, 0xbc, 0x85, 0x72, 0x7b, 0x1e, 0x8a, 0x6d, 0xb7, 0x2e, 0x0e, 0x98, + 0xf7, 0xc9, 0xde, 0xbe, 0xb1, 0xb2, 0x78, 0xb0, 0x5f, 0x7e, 0x34, 0xcf, 0xd8, 0x42, 0x4f, 0xb6, + 0x70, 0xf6, 0xc6, 0xca, 0x22, 0xa6, 0x85, 0xd1, 0x1c, 0x8c, 0xcb, 0x13, 0xfa, 0x26, 0x15, 0x10, + 0x7d, 0x4f, 0x9c, 0x43, 0x4a, 0x6b, 0x8d, 0x4d, 0x34, 0x4e, 0xd2, 0xa3, 0x45, 0x98, 0xd8, 0x69, + 0x6f, 0x90, 0x06, 0x89, 0xf8, 0x07, 0x5f, 0x25, 0x5c, 0xa7, 0x5b, 0x8a, 0xef, 0x92, 0x57, 0x13, + 0x78, 0x9c, 0x2a, 0x61, 0xff, 0x39, 0x3b, 0x0f, 0x44, 0xef, 0x55, 0x02, 0x9f, 0x4e, 0x2c, 0xca, + 0xfd, 0x5b, 0x39, 0x9d, 0x7b, 0x99, 0x15, 0x57, 0xc9, 0xde, 0xba, 0x4f, 0xef, 0x12, 0xd9, 0xb3, + 0xc2, 0x98, 0xf3, 0x7d, 0x1d, 0xe7, 0xfc, 0xcf, 0x17, 0xe0, 0xa4, 0xea, 0x01, 0x43, 0x6c, 0xfd, + 0x76, 0xef, 0x83, 0x4b, 0x30, 0x5c, 0x27, 0x9b, 0x4e, 0xbb, 0x11, 0x29, 0x03, 0x43, 0x3f, 0x37, + 0x32, 0x2d, 0xc6, 0x60, 0xac, 0xd3, 0x1c, 0xa2, 0xdb, 0xfe, 0xfd, 0x08, 0x3b, 0x88, 0x23, 0x87, + 0xce, 0x71, 0xb5, 0x6a, 0xac, 0xdc, 0x55, 0xf3, 0x18, 0xf4, 0xbb, 0x4d, 0x2a, 0x98, 0x15, 0x4c, + 0x79, 0x6b, 0x85, 0x02, 0x31, 0xc7, 0xa1, 0x27, 0x60, 0xb0, 0xe6, 0x37, 0x9b, 0x8e, 0x57, 0x67, + 0x47, 0x5e, 0x69, 0x7e, 0x98, 0xca, 0x6e, 0x0b, 0x1c, 0x84, 0x25, 0x0e, 0x3d, 0x02, 0x7d, 0x4e, + 0xb0, 0xc5, 0xb5, 0x2e, 0xa5, 0xf9, 0x21, 0x5a, 0xd3, 0x5c, 0xb0, 0x15, 0x62, 0x06, 0xa5, 0x97, + 0xc6, 0xdb, 0x7e, 0xb0, 0xe3, 0x7a, 0x5b, 0x8b, 0x6e, 0x20, 0x96, 0x84, 0x3a, 0x0b, 0x6f, 0x29, + 0x0c, 0xd6, 0xa8, 0xd0, 0x32, 0xf4, 0xb7, 0xfc, 0x20, 0x0a, 0xa7, 0x07, 0x58, 0x77, 0x3f, 0x9a, + 0xb3, 0x11, 0xf1, 0xaf, 0xad, 0xf8, 0x41, 0x14, 0x7f, 0x00, 0xfd, 0x17, 0x62, 0x5e, 0x1c, 0x5d, + 0x83, 0x41, 0xe2, 0xed, 0x2e, 0x07, 0x7e, 0x73, 0x7a, 0x2a, 0x9f, 0xd3, 0x12, 0x27, 0xe1, 0xd3, + 0x2c, 0x96, 0x51, 0x05, 0x18, 0x4b, 0x16, 0xe8, 0x83, 0x50, 0x24, 0xde, 0xee, 0xf4, 0x20, 0xe3, + 0x34, 0x93, 0xc3, 0xe9, 0xa6, 0x13, 0xc4, 0x7b, 0xfe, 0x92, 0xb7, 0x8b, 0x69, 0x19, 0xf4, 0x31, + 0x28, 0xc9, 0x0d, 0x23, 0x14, 0xea, 0xcc, 0xcc, 0x09, 0x2b, 0xb7, 0x19, 0x4c, 0xde, 0x6a, 0xbb, + 0x01, 0x69, 0x12, 0x2f, 0x0a, 0xe3, 0x1d, 0x52, 0x62, 0x43, 0x1c, 0x73, 0x43, 0x35, 0x18, 0x09, + 0x48, 0xe8, 0xde, 0x25, 0x15, 0xbf, 0xe1, 0xd6, 0xf6, 0xa6, 0x1f, 0x62, 0xcd, 0x7b, 0xaa, 0x63, + 0x97, 0x61, 0xad, 0x40, 0xac, 0x6e, 0xd7, 0xa1, 0xd8, 0x60, 0x8a, 0x3e, 0x26, 0x15, 0xf5, 0xab, + 0x7e, 0xdb, 0x8b, 0xc2, 0xe9, 0x12, 0xab, 0x24, 0xd3, 0x84, 0x7a, 0x33, 0xa6, 0x4b, 0x6a, 0xf2, + 0x79, 0x61, 0x6c, 0xb0, 0x42, 0x9f, 0x80, 0x51, 0xfe, 0x9f, 0x1b, 0x22, 0xc3, 0xe9, 0x93, 0x8c, + 0xf7, 0xb9, 0x7c, 0xde, 0x9c, 0x70, 0xfe, 0xa4, 0x60, 0x3e, 0xaa, 0x43, 0x43, 0x6c, 0x72, 0x43, + 0x18, 0x46, 0x1b, 0xee, 0x2e, 0xf1, 0x48, 0x18, 0x56, 0x02, 0x7f, 0x83, 0x08, 0xbd, 0xea, 0xe9, + 0x6c, 0xc3, 0xa5, 0xbf, 0x41, 0xe6, 0x27, 0x29, 0xcf, 0x6b, 0x7a, 0x19, 0x6c, 0xb2, 0x40, 0x37, + 0x60, 0x8c, 0x5e, 0x64, 0xdd, 0x98, 0xe9, 0x70, 0x37, 0xa6, 0xec, 0xf2, 0x86, 0x8d, 0x42, 0x38, + 0xc1, 0x04, 0x5d, 0x87, 0x91, 0x30, 0x72, 0x82, 0xa8, 0xdd, 0xe2, 0x4c, 0x4f, 0x75, 0x63, 0xca, + 0xec, 0xde, 0x55, 0xad, 0x08, 0x36, 0x18, 0xa0, 0xd7, 0xa0, 0xd4, 0x70, 0x37, 0x49, 0x6d, 0xaf, + 0xd6, 0x20, 0xd3, 0x23, 0x8c, 0x5b, 0xe6, 0xce, 0x75, 0x4d, 0x12, 0x71, 0x61, 0x5a, 0xfd, 0xc5, + 0x71, 0x71, 0x74, 0x13, 0x4e, 0x45, 0x24, 0x68, 0xba, 0x9e, 0x43, 0x77, 0x1c, 0x71, 0x7f, 0x63, + 0xf6, 0xe4, 0x51, 0xb6, 0xa4, 0xcf, 0x8a, 0xd1, 0x38, 0xb5, 0x9e, 0x49, 0x85, 0x73, 0x4a, 0xa3, + 0x3b, 0x30, 0x9d, 0x81, 0xe1, 0x53, 0xf9, 0x04, 0xe3, 0xfc, 0x21, 0xc1, 0x79, 0x7a, 0x3d, 0x87, + 0xee, 0xa0, 0x03, 0x0e, 0xe7, 0x72, 0x47, 0xd7, 0x61, 0x9c, 0x6d, 0x73, 0x95, 0x76, 0xa3, 0x21, + 0x2a, 0x1c, 0x63, 0x15, 0x3e, 0x21, 0x0f, 0xfd, 0x15, 0x13, 0x7d, 0xb0, 0x5f, 0x86, 0xf8, 0x1f, + 0x4e, 0x96, 0x46, 0x1b, 0xcc, 0x74, 0xd9, 0x0e, 0xdc, 0x68, 0x8f, 0xae, 0x34, 0x72, 0x27, 0x9a, + 0x1e, 0xef, 0xa8, 0xc6, 0xd1, 0x49, 0x95, 0x7d, 0x53, 0x07, 0xe2, 0x24, 0x43, 0xba, 0x6f, 0x87, + 0x51, 0xdd, 0xf5, 0xa6, 0x27, 0xf8, 0xe5, 0x47, 0x6e, 0x7b, 0x55, 0x0a, 0xc4, 0x1c, 0xc7, 0xcc, + 0x96, 0xf4, 0xc7, 0x75, 0x7a, 0x3c, 0x4e, 0x32, 0xc2, 0xd8, 0x6c, 0x29, 0x11, 0x38, 0xa6, 0xa1, + 0x12, 0x6b, 0x14, 0xed, 0x4d, 0x23, 0x46, 0xaa, 0x76, 0xaf, 0xf5, 0xf5, 0x8f, 0x61, 0x0a, 0xb7, + 0x37, 0x60, 0x4c, 0x6d, 0x1d, 0xac, 0x4f, 0x50, 0x19, 0xfa, 0x99, 0x8c, 0x26, 0x94, 0x8e, 0x25, + 0xda, 0x04, 0x26, 0xbf, 0x61, 0x0e, 0x67, 0x4d, 0x70, 0xef, 0x92, 0xf9, 0xbd, 0x88, 0x70, 0xc5, + 0x41, 0x51, 0x6b, 0x82, 0x44, 0xe0, 0x98, 0xc6, 0xfe, 0xbf, 0x5c, 0xd6, 0x8d, 0xb7, 0xf4, 0x1e, + 0x0e, 0xb1, 0x67, 0x60, 0x68, 0xdb, 0x0f, 0x23, 0x4a, 0xcd, 0xea, 0xe8, 0x8f, 0xa5, 0xdb, 0x2b, + 0x02, 0x8e, 0x15, 0x05, 0x7a, 0x05, 0x46, 0x6b, 0x7a, 0x05, 0xe2, 0x04, 0x56, 0xdb, 0x88, 0x51, + 0x3b, 0x36, 0x69, 0xd1, 0x4b, 0x30, 0xc4, 0x5c, 0x71, 0x6a, 0x7e, 0x43, 0x88, 0x86, 0x52, 0x8c, + 0x18, 0xaa, 0x08, 0xf8, 0x81, 0xf6, 0x1b, 0x2b, 0x6a, 0x74, 0x1e, 0x06, 0x68, 0x13, 0x56, 0x2a, + 0xe2, 0xec, 0x53, 0xfa, 0xb3, 0x2b, 0x0c, 0x8a, 0x05, 0xd6, 0xfe, 0x55, 0x8b, 0x09, 0x3e, 0xe9, + 0x0d, 0x1a, 0x5d, 0x61, 0x3b, 0x3c, 0xdb, 0xee, 0x35, 0xfd, 0xd5, 0xe3, 0xda, 0xb6, 0xad, 0x70, + 0x07, 0x89, 0xff, 0xd8, 0x28, 0x89, 0xde, 0x80, 0xd1, 0x80, 0xb0, 0x2d, 0x42, 0x4c, 0x78, 0x7e, + 0xfa, 0xbf, 0x20, 0xbb, 0x00, 0xeb, 0xc8, 0x83, 0xfd, 0xf2, 0xc3, 0xf1, 0x79, 0x44, 0xdb, 0x63, + 0xa0, 0xb1, 0xc9, 0xca, 0xfe, 0xcb, 0x05, 0x6d, 0x96, 0x54, 0x23, 0x27, 0x22, 0xa8, 0x02, 0x83, + 0xb7, 0x1d, 0x37, 0x72, 0xbd, 0x2d, 0x21, 0xa4, 0x75, 0x3e, 0x95, 0x58, 0xa1, 0x5b, 0xbc, 0x00, + 0x17, 0x35, 0xc4, 0x1f, 0x2c, 0xd9, 0x50, 0x8e, 0x41, 0xdb, 0xf3, 0x28, 0xc7, 0x42, 0xaf, 0x1c, + 0x31, 0x2f, 0xc0, 0x39, 0x8a, 0x3f, 0x58, 0xb2, 0x41, 0x6f, 0x02, 0xc8, 0x1d, 0x82, 0xd4, 0x85, + 0x0b, 0xcf, 0x33, 0xdd, 0x99, 0xae, 0xab, 0x32, 0xf3, 0x63, 0x54, 0x90, 0x89, 0xff, 0x63, 0x8d, + 0x9f, 0x1d, 0x69, 0x63, 0xaa, 0x37, 0x06, 0x7d, 0x9c, 0x2e, 0x51, 0x27, 0x88, 0x48, 0x7d, 0x2e, + 0x12, 0x9d, 0xf3, 0xde, 0xde, 0x6e, 0x72, 0xeb, 0x6e, 0x93, 0xe8, 0xcb, 0x59, 0x30, 0xc1, 0x31, + 0x3f, 0xfb, 0x17, 0x8b, 0x30, 0x9d, 0xd7, 0x5c, 0xba, 0x68, 0xc8, 0x1d, 0x37, 0x5a, 0xa0, 0x32, + 0xa8, 0x65, 0x2e, 0x9a, 0x25, 0x01, 0xc7, 0x8a, 0x82, 0xce, 0xde, 0xd0, 0xdd, 0x92, 0x17, 0xf1, + 0xfe, 0x78, 0xf6, 0x56, 0x19, 0x14, 0x0b, 0x2c, 0xa5, 0x0b, 0x88, 0x13, 0x0a, 0x1f, 0x31, 0x6d, + 0x96, 0x63, 0x06, 0xc5, 0x02, 0xab, 0xab, 0x04, 0xfb, 0xba, 0xa8, 0x04, 0x8d, 0x2e, 0xea, 0x3f, + 0xda, 0x2e, 0x42, 0x9f, 0x04, 0xd8, 0x74, 0x3d, 0x37, 0xdc, 0x66, 0xdc, 0x07, 0x0e, 0xcd, 0x5d, + 0x49, 0xb0, 0xcb, 0x8a, 0x0b, 0xd6, 0x38, 0xa2, 0x17, 0x61, 0x58, 0x6d, 0x20, 0x2b, 0x8b, 0xcc, + 0x60, 0xae, 0x39, 0x20, 0xc5, 0xbb, 0xe9, 0x22, 0xd6, 0xe9, 0xec, 0x4f, 0x27, 0xe7, 0x8b, 0x58, + 0x01, 0x5a, 0xff, 0x5a, 0xbd, 0xf6, 0x6f, 0xa1, 0x73, 0xff, 0xda, 0xdf, 0x18, 0x80, 0x71, 0xa3, + 0xb2, 0x76, 0xd8, 0xc3, 0x9e, 0x7b, 0x99, 0x1e, 0x40, 0x4e, 0x44, 0xc4, 0xfa, 0xb3, 0xbb, 0x2f, + 0x15, 0xfd, 0x90, 0xa2, 0x2b, 0x80, 0x97, 0x47, 0x9f, 0x84, 0x52, 0xc3, 0x09, 0x99, 0x7a, 0x91, + 0x88, 0x75, 0xd7, 0x0b, 0xb3, 0xf8, 0xf6, 0xe6, 0x84, 0x91, 0x76, 0xea, 0x73, 0xde, 0x31, 0x4b, + 0x7a, 0x52, 0x52, 0xf9, 0x4a, 0x3a, 0x21, 0xaa, 0x46, 0x50, 0x21, 0x6c, 0x0f, 0x73, 0x1c, 0x7a, + 0x89, 0x6d, 0xad, 0x74, 0x56, 0x2c, 0x50, 0x69, 0x94, 0x4d, 0xb3, 0x7e, 0x43, 0x22, 0x56, 0x38, + 0x6c, 0x50, 0xc6, 0x17, 0xa8, 0x81, 0x0e, 0x17, 0xa8, 0xa7, 0x60, 0x90, 0xfd, 0x50, 0x33, 0x40, + 0x8d, 0xc6, 0x0a, 0x07, 0x63, 0x89, 0x4f, 0x4e, 0x98, 0xa1, 0xde, 0x26, 0x0c, 0xbd, 0xa2, 0x89, + 0x49, 0xcd, 0x9c, 0x15, 0x86, 0xf8, 0x2e, 0x27, 0xa6, 0x3c, 0x96, 0x38, 0xf4, 0xb3, 0x16, 0x20, + 0xa7, 0x41, 0xaf, 0xb6, 0x14, 0xac, 0x6e, 0x22, 0xc0, 0x44, 0xed, 0x57, 0xba, 0x76, 0x7b, 0x3b, + 0x9c, 0x9d, 0x4b, 0x95, 0xe6, 0x6a, 0xcd, 0x97, 0x45, 0x13, 0x51, 0x9a, 0x40, 0x3f, 0x8c, 0xae, + 0xb9, 0x61, 0xf4, 0xb9, 0xff, 0x98, 0x38, 0x9c, 0x32, 0x9a, 0x84, 0x6e, 0xe8, 0x37, 0xa5, 0xe1, + 0x43, 0xde, 0x94, 0x46, 0xf3, 0x6e, 0x49, 0x33, 0x6d, 0x78, 0x28, 0xe7, 0x0b, 0x32, 0x94, 0xa5, + 0x8b, 0xba, 0xb2, 0xb4, 0x8b, 0x8a, 0x6d, 0x56, 0xd6, 0x31, 0xfb, 0x7a, 0xdb, 0xf1, 0x22, 0x37, + 0xda, 0xd3, 0x95, 0xab, 0xef, 0x85, 0xb1, 0x45, 0x87, 0x34, 0x7d, 0x6f, 0xc9, 0xab, 0xb7, 0x7c, + 0xd7, 0x8b, 0xd0, 0x34, 0xf4, 0x31, 0xe1, 0x83, 0x6f, 0xbd, 0x7d, 0xb4, 0xf7, 0x30, 0x83, 0xd8, + 0x5b, 0x70, 0x72, 0xd1, 0xbf, 0xed, 0xdd, 0x76, 0x82, 0xfa, 0x5c, 0x65, 0x45, 0x53, 0xfe, 0xac, + 0x49, 0xe5, 0x83, 0x95, 0x7f, 0xb5, 0xd3, 0x4a, 0xf2, 0xeb, 0xd0, 0xb2, 0xdb, 0x20, 0x39, 0x2a, + 0xba, 0xbf, 0x56, 0x30, 0x6a, 0x8a, 0xe9, 0x95, 0x91, 0xd8, 0xca, 0x35, 0x12, 0xbf, 0x0e, 0x43, + 0x9b, 0x2e, 0x69, 0xd4, 0x31, 0xd9, 0x14, 0xbd, 0xf3, 0x64, 0xbe, 0x1b, 0xd9, 0x32, 0xa5, 0x94, + 0x2a, 0x59, 0xae, 0xba, 0x58, 0x16, 0x85, 0xb1, 0x62, 0x83, 0x76, 0x60, 0x42, 0xf6, 0xa1, 0xc4, + 0x8a, 0xfd, 0xe0, 0xa9, 0x4e, 0x03, 0x6f, 0x32, 0x3f, 0x71, 0x6f, 0xbf, 0x3c, 0x81, 0x13, 0x6c, + 0x70, 0x8a, 0x31, 0x7a, 0x04, 0xfa, 0x9a, 0xf4, 0xe4, 0xeb, 0x63, 0xdd, 0xcf, 0x74, 0x15, 0x4c, + 0xed, 0xc2, 0xa0, 0xf6, 0x8f, 0x5b, 0xf0, 0x50, 0xaa, 0x67, 0x84, 0xfa, 0xe9, 0x88, 0x47, 0x21, + 0xa9, 0x0e, 0x2a, 0x74, 0x57, 0x07, 0xd9, 0x7f, 0xdb, 0x82, 0x13, 0x4b, 0xcd, 0x56, 0xb4, 0xb7, + 0xe8, 0x9a, 0x16, 0xdd, 0x0f, 0xc0, 0x40, 0x93, 0xd4, 0xdd, 0x76, 0x53, 0x8c, 0x5c, 0x59, 0x9e, + 0x0e, 0xab, 0x0c, 0x7a, 0xb0, 0x5f, 0x1e, 0xad, 0x46, 0x7e, 0xe0, 0x6c, 0x11, 0x0e, 0xc0, 0x82, + 0x9c, 0x9d, 0xb1, 0xee, 0x5d, 0x72, 0xcd, 0x6d, 0xba, 0xd1, 0xfd, 0xcd, 0x76, 0x61, 0x8c, 0x95, + 0x4c, 0x70, 0xcc, 0xcf, 0xfe, 0xba, 0x05, 0xe3, 0x72, 0xde, 0xcf, 0xd5, 0xeb, 0x01, 0x09, 0x43, + 0x34, 0x03, 0x05, 0xb7, 0x25, 0x5a, 0x09, 0xa2, 0x95, 0x85, 0x95, 0x0a, 0x2e, 0xb8, 0x2d, 0x29, + 0xce, 0xb3, 0x03, 0xa8, 0x68, 0xda, 0xa5, 0xaf, 0x08, 0x38, 0x56, 0x14, 0xe8, 0x02, 0x0c, 0x79, + 0x7e, 0x9d, 0x4b, 0xc4, 0x5c, 0x94, 0x60, 0x13, 0x6c, 0x4d, 0xc0, 0xb0, 0xc2, 0xa2, 0x0a, 0x94, + 0xb8, 0xd7, 0x62, 0x3c, 0x69, 0x7b, 0xf2, 0x7d, 0x64, 0x5f, 0xb6, 0x2e, 0x4b, 0xe2, 0x98, 0x89, + 0xfd, 0xeb, 0x16, 0x8c, 0xc8, 0x2f, 0xeb, 0xf1, 0xae, 0x42, 0x97, 0x56, 0x7c, 0x4f, 0x89, 0x97, + 0x16, 0xbd, 0x6b, 0x30, 0x8c, 0x71, 0xc5, 0x28, 0x1e, 0xea, 0x8a, 0x71, 0x09, 0x86, 0x9d, 0x56, + 0xab, 0x62, 0xde, 0x4f, 0xd8, 0x54, 0x9a, 0x8b, 0xc1, 0x58, 0xa7, 0xb1, 0x7f, 0xac, 0x00, 0x63, + 0xf2, 0x0b, 0xaa, 0xed, 0x8d, 0x90, 0x44, 0x68, 0x1d, 0x4a, 0x0e, 0x1f, 0x25, 0x22, 0x27, 0xf9, + 0x63, 0xd9, 0x4a, 0x2e, 0x63, 0x48, 0x63, 0x41, 0x6b, 0x4e, 0x96, 0xc6, 0x31, 0x23, 0xd4, 0x80, + 0x49, 0xcf, 0x8f, 0xd8, 0xa1, 0xab, 0xf0, 0x9d, 0xec, 0x8e, 0x49, 0xee, 0xa7, 0x05, 0xf7, 0xc9, + 0xb5, 0x24, 0x17, 0x9c, 0x66, 0x8c, 0x96, 0xa4, 0xe2, 0xb0, 0x98, 0xaf, 0x44, 0xd2, 0x07, 0x2e, + 0x5b, 0x6f, 0x68, 0xff, 0x8a, 0x05, 0x25, 0x49, 0x76, 0x1c, 0x26, 0xe6, 0x55, 0x18, 0x0c, 0xd9, + 0x20, 0xc8, 0xae, 0xb1, 0x3b, 0x35, 0x9c, 0x8f, 0x57, 0x2c, 0x4b, 0xf0, 0xff, 0x21, 0x96, 0x3c, + 0x98, 0xdd, 0x48, 0x35, 0xff, 0x1d, 0x62, 0x37, 0x52, 0xed, 0xc9, 0x39, 0x94, 0xfe, 0x88, 0xb5, + 0x59, 0x53, 0xc4, 0x52, 0x91, 0xb7, 0x15, 0x90, 0x4d, 0xf7, 0x4e, 0x52, 0xe4, 0xad, 0x30, 0x28, + 0x16, 0x58, 0xf4, 0x26, 0x8c, 0xd4, 0xa4, 0xc1, 0x20, 0x5e, 0xe1, 0xe7, 0x3b, 0x1a, 0xaf, 0x94, + 0x9d, 0x93, 0xeb, 0xd0, 0x16, 0xb4, 0xf2, 0xd8, 0xe0, 0x66, 0x7a, 0xe5, 0x14, 0xbb, 0x79, 0xe5, + 0xc4, 0x7c, 0xf3, 0x7d, 0x54, 0x7e, 0xc2, 0x82, 0x01, 0xae, 0x28, 0xee, 0x4d, 0x4f, 0xaf, 0x99, + 0x7d, 0xe3, 0xbe, 0xbb, 0x49, 0x81, 0x42, 0xd2, 0x40, 0xab, 0x50, 0x62, 0x3f, 0x98, 0xa2, 0xbb, + 0x98, 0xff, 0x68, 0x86, 0xd7, 0xaa, 0x37, 0xf0, 0xa6, 0x2c, 0x86, 0x63, 0x0e, 0xf6, 0x8f, 0x16, + 0xe9, 0xee, 0x16, 0x93, 0x1a, 0x87, 0xbe, 0xf5, 0xe0, 0x0e, 0xfd, 0xc2, 0x83, 0x3a, 0xf4, 0xb7, + 0x60, 0xbc, 0xa6, 0x19, 0x89, 0xe3, 0x91, 0xbc, 0xd0, 0x71, 0x92, 0x68, 0xf6, 0x64, 0xae, 0x9d, + 0x5b, 0x30, 0x99, 0xe0, 0x24, 0x57, 0xf4, 0x71, 0x18, 0xe1, 0xe3, 0x2c, 0x6a, 0xe1, 0x8e, 0x4d, + 0x4f, 0xe4, 0xcf, 0x17, 0xbd, 0x0a, 0xae, 0xcd, 0xd5, 0x8a, 0x63, 0x83, 0x99, 0xfd, 0xa7, 0x16, + 0xa0, 0xa5, 0xd6, 0x36, 0x69, 0x92, 0xc0, 0x69, 0xc4, 0xb6, 0x9e, 0x1f, 0xb2, 0x60, 0x9a, 0xa4, + 0xc0, 0x0b, 0x7e, 0xb3, 0x29, 0x2e, 0x8b, 0x39, 0xfa, 0x8c, 0xa5, 0x9c, 0x32, 0xea, 0x55, 0xd1, + 0x74, 0x1e, 0x05, 0xce, 0xad, 0x0f, 0xad, 0xc2, 0x14, 0x3f, 0x25, 0x15, 0x42, 0x73, 0x92, 0x7a, + 0x58, 0x30, 0x9e, 0x5a, 0x4f, 0x93, 0xe0, 0xac, 0x72, 0xf6, 0x37, 0x47, 0x20, 0xb7, 0x15, 0xef, + 0x1a, 0xb9, 0xde, 0x35, 0x72, 0xbd, 0x6b, 0xe4, 0x7a, 0xd7, 0xc8, 0xf5, 0xae, 0x91, 0xeb, 0x5d, + 0x23, 0xd7, 0x51, 0x18, 0xb9, 0xfe, 0x8a, 0x05, 0x27, 0xd5, 0x59, 0x63, 0xdc, 0xae, 0x3f, 0x03, + 0x53, 0x7c, 0xb9, 0x19, 0xde, 0xbb, 0xe2, 0x6c, 0xbd, 0x94, 0x39, 0x73, 0x13, 0x5e, 0xe6, 0x46, + 0x41, 0xfe, 0x5c, 0x27, 0x03, 0x81, 0xb3, 0xaa, 0xb1, 0x7f, 0x71, 0x08, 0xfa, 0x97, 0x76, 0x89, + 0x17, 0x1d, 0xc3, 0x3d, 0xa4, 0x06, 0x63, 0xae, 0xb7, 0xeb, 0x37, 0x76, 0x49, 0x9d, 0xe3, 0x0f, + 0x73, 0x5d, 0x3e, 0x25, 0x58, 0x8f, 0xad, 0x18, 0x2c, 0x70, 0x82, 0xe5, 0x83, 0x30, 0x15, 0x5c, + 0x86, 0x01, 0x7e, 0x52, 0x08, 0x3b, 0x41, 0xe6, 0x9e, 0xcd, 0x3a, 0x51, 0x9c, 0x7f, 0xb1, 0x19, + 0x83, 0x9f, 0x44, 0xa2, 0x38, 0xfa, 0x34, 0x8c, 0x6d, 0xba, 0x41, 0x18, 0xad, 0xbb, 0x4d, 0x12, + 0x46, 0x4e, 0xb3, 0x75, 0x1f, 0xa6, 0x01, 0xd5, 0x0f, 0xcb, 0x06, 0x27, 0x9c, 0xe0, 0x8c, 0xb6, + 0x60, 0xb4, 0xe1, 0xe8, 0x55, 0x0d, 0x1e, 0xba, 0x2a, 0x75, 0x3a, 0x5c, 0xd3, 0x19, 0x61, 0x93, + 0x2f, 0x5d, 0x4e, 0x35, 0xa6, 0xdd, 0x1e, 0x62, 0xba, 0x07, 0xb5, 0x9c, 0xb8, 0x5a, 0x9b, 0xe3, + 0xa8, 0x34, 0xc5, 0x5c, 0xc4, 0x4b, 0xa6, 0x34, 0xa5, 0x39, 0x82, 0x7f, 0x0a, 0x4a, 0x84, 0x76, + 0x21, 0x65, 0x2c, 0x0e, 0x98, 0x8b, 0xbd, 0xb5, 0x75, 0xd5, 0xad, 0x05, 0xbe, 0x69, 0x94, 0x59, + 0x92, 0x9c, 0x70, 0xcc, 0x14, 0x2d, 0xc0, 0x40, 0x48, 0x02, 0x57, 0x29, 0x7e, 0x3b, 0x0c, 0x23, + 0x23, 0xe3, 0xef, 0xc1, 0xf8, 0x6f, 0x2c, 0x8a, 0xd2, 0xe9, 0xe5, 0x30, 0xbd, 0x29, 0x3b, 0x0c, + 0xb4, 0xe9, 0x35, 0xc7, 0xa0, 0x58, 0x60, 0xd1, 0x6b, 0x30, 0x18, 0x90, 0x06, 0xb3, 0xfa, 0x8d, + 0xf6, 0x3e, 0xc9, 0xb9, 0x11, 0x91, 0x97, 0xc3, 0x92, 0x01, 0xba, 0x0a, 0x28, 0x20, 0x54, 0x1a, + 0x73, 0xbd, 0x2d, 0xe5, 0x38, 0x2d, 0x36, 0x5a, 0x25, 0xf5, 0xe2, 0x98, 0x42, 0x3e, 0x05, 0xc4, + 0x19, 0xc5, 0xd0, 0x65, 0x98, 0x54, 0xd0, 0x15, 0x2f, 0x8c, 0x1c, 0xba, 0xc1, 0x8d, 0x33, 0x5e, + 0x4a, 0x19, 0x82, 0x93, 0x04, 0x38, 0x5d, 0xc6, 0xfe, 0x92, 0x05, 0xbc, 0x9f, 0x8f, 0x41, 0x05, + 0xf0, 0xaa, 0xa9, 0x02, 0x38, 0x9d, 0x3b, 0x72, 0x39, 0xd7, 0xff, 0x2f, 0x59, 0x30, 0xac, 0x8d, + 0x6c, 0x3c, 0x67, 0xad, 0x0e, 0x73, 0xb6, 0x0d, 0x13, 0x74, 0xa6, 0x5f, 0xdf, 0x08, 0x49, 0xb0, + 0x4b, 0xea, 0x6c, 0x62, 0x16, 0xee, 0x6f, 0x62, 0x2a, 0x27, 0xcd, 0x6b, 0x09, 0x86, 0x38, 0x55, + 0x85, 0xfd, 0x29, 0xd9, 0x54, 0xe5, 0xd3, 0x5a, 0x53, 0x63, 0x9e, 0xf0, 0x69, 0x55, 0xa3, 0x8a, + 0x63, 0x1a, 0xba, 0xd4, 0xb6, 0xfd, 0x30, 0x4a, 0xfa, 0xb4, 0x5e, 0xf1, 0xc3, 0x08, 0x33, 0x8c, + 0xfd, 0x3c, 0xc0, 0xd2, 0x1d, 0x52, 0xe3, 0x33, 0x56, 0xbf, 0xa1, 0x58, 0xf9, 0x37, 0x14, 0xfb, + 0x77, 0x2c, 0x18, 0x5b, 0x5e, 0x30, 0x4e, 0xae, 0x59, 0x00, 0x7e, 0xad, 0xba, 0x75, 0x6b, 0x4d, + 0xfa, 0x6a, 0x70, 0x73, 0xb5, 0x82, 0x62, 0x8d, 0x02, 0x9d, 0x86, 0x62, 0xa3, 0xed, 0x09, 0x1d, + 0xe5, 0x20, 0x3d, 0x1e, 0xaf, 0xb5, 0x3d, 0x4c, 0x61, 0xda, 0x33, 0xa0, 0x62, 0xcf, 0xcf, 0x80, + 0xba, 0x86, 0xff, 0x40, 0x65, 0xe8, 0xbf, 0x7d, 0xdb, 0xad, 0xf3, 0x47, 0xd6, 0xc2, 0x8f, 0xe4, + 0xd6, 0xad, 0x95, 0xc5, 0x10, 0x73, 0xb8, 0xfd, 0x85, 0x22, 0xcc, 0x2c, 0x37, 0xc8, 0x9d, 0xb7, + 0xf9, 0xd0, 0xbc, 0xd7, 0x47, 0x4c, 0x87, 0xd3, 0xf6, 0x1c, 0xf6, 0xa1, 0x5a, 0xf7, 0xfe, 0xd8, + 0x84, 0x41, 0xee, 0xd2, 0x29, 0x9f, 0x9d, 0x67, 0xda, 0xe6, 0xf2, 0x3b, 0x64, 0x96, 0xbb, 0x86, + 0x0a, 0xdb, 0x9c, 0x3a, 0x30, 0x05, 0x14, 0x4b, 0xe6, 0x33, 0x2f, 0xc3, 0x88, 0x4e, 0x79, 0xa8, + 0x27, 0xa3, 0xdf, 0x53, 0x84, 0x09, 0xda, 0x82, 0x07, 0x3a, 0x10, 0x37, 0xd2, 0x03, 0x71, 0xd4, + 0xcf, 0x06, 0xbb, 0x8f, 0xc6, 0x9b, 0xc9, 0xd1, 0xb8, 0x94, 0x37, 0x1a, 0xc7, 0x3d, 0x06, 0xdf, + 0x6b, 0xc1, 0xd4, 0x72, 0xc3, 0xaf, 0xed, 0x24, 0x9e, 0xf6, 0xbd, 0x08, 0xc3, 0x74, 0x3b, 0x0e, + 0x8d, 0x28, 0x17, 0x46, 0xdc, 0x13, 0x81, 0xc2, 0x3a, 0x9d, 0x56, 0xec, 0xc6, 0x8d, 0x95, 0xc5, + 0xac, 0x70, 0x29, 0x02, 0x85, 0x75, 0x3a, 0xfb, 0xb7, 0x2c, 0x38, 0x73, 0x79, 0x61, 0x29, 0x9e, + 0x8a, 0xa9, 0x88, 0x2d, 0xe7, 0x61, 0xa0, 0x55, 0xd7, 0x9a, 0x12, 0xeb, 0x70, 0x17, 0x59, 0x2b, + 0x04, 0xf6, 0x9d, 0x12, 0x8d, 0xe8, 0x06, 0xc0, 0x65, 0x5c, 0x59, 0x10, 0xfb, 0xae, 0x34, 0xd9, + 0x58, 0xb9, 0x26, 0x9b, 0x27, 0x60, 0x90, 0x9e, 0x0b, 0x6e, 0x4d, 0xb6, 0x9b, 0x5b, 0xdf, 0x39, + 0x08, 0x4b, 0x9c, 0xfd, 0x73, 0x16, 0x4c, 0x5d, 0x76, 0x23, 0x7a, 0x68, 0x27, 0x43, 0x92, 0xd0, + 0x53, 0x3b, 0x74, 0x23, 0x3f, 0xd8, 0x4b, 0x86, 0x24, 0xc1, 0x0a, 0x83, 0x35, 0x2a, 0xfe, 0x41, + 0xbb, 0x2e, 0x7b, 0xa3, 0x50, 0x30, 0x8d, 0x64, 0x58, 0xc0, 0xb1, 0xa2, 0xa0, 0xfd, 0x55, 0x77, + 0x03, 0xa6, 0x5f, 0xdc, 0x13, 0x1b, 0xb7, 0xea, 0xaf, 0x45, 0x89, 0xc0, 0x31, 0x8d, 0xfd, 0xc7, + 0x16, 0x94, 0x2f, 0x37, 0xda, 0x61, 0x44, 0x82, 0xcd, 0x30, 0x67, 0xd3, 0x7d, 0x1e, 0x4a, 0x44, + 0x6a, 0xf3, 0xe5, 0x63, 0x4a, 0x29, 0x88, 0x2a, 0x35, 0x3f, 0x8f, 0x8c, 0xa2, 0xe8, 0x7a, 0x78, + 0x7f, 0x7c, 0xb8, 0x07, 0xa4, 0xcb, 0x80, 0x88, 0x5e, 0x97, 0x1e, 0x2a, 0x86, 0xc5, 0x9c, 0x58, + 0x4a, 0x61, 0x71, 0x46, 0x09, 0xfb, 0xc7, 0x2d, 0x38, 0xa9, 0x3e, 0xf8, 0x1d, 0xf7, 0x99, 0xf6, + 0x57, 0x0b, 0x30, 0x7a, 0x65, 0x7d, 0xbd, 0x72, 0x99, 0x44, 0xda, 0xac, 0xec, 0x6c, 0xa3, 0xc7, + 0x9a, 0xa9, 0xb1, 0xd3, 0x1d, 0xb1, 0x1d, 0xb9, 0x8d, 0x59, 0x1e, 0x71, 0x6c, 0x76, 0xc5, 0x8b, + 0xae, 0x07, 0xd5, 0x28, 0x70, 0xbd, 0xad, 0xcc, 0x99, 0x2e, 0x65, 0x96, 0x62, 0x9e, 0xcc, 0x82, + 0x9e, 0x87, 0x01, 0x16, 0xf2, 0x4c, 0x0e, 0xc2, 0xc3, 0xea, 0x8a, 0xc5, 0xa0, 0x07, 0xfb, 0xe5, + 0xd2, 0x0d, 0xbc, 0xc2, 0xff, 0x60, 0x41, 0x8a, 0x6e, 0xc0, 0xf0, 0x76, 0x14, 0xb5, 0xae, 0x10, + 0xa7, 0x4e, 0x02, 0xb9, 0xcb, 0x9e, 0xcd, 0xda, 0x65, 0x69, 0x27, 0x70, 0xb2, 0x78, 0x63, 0x8a, + 0x61, 0x21, 0xd6, 0xf9, 0xd8, 0x55, 0x80, 0x18, 0x77, 0x44, 0x56, 0x16, 0x7b, 0x1d, 0x4a, 0xf4, + 0x73, 0xe7, 0x1a, 0xae, 0xd3, 0xd9, 0x8e, 0xfd, 0x34, 0x94, 0xa4, 0x95, 0x3a, 0x14, 0xf1, 0x11, + 0xd8, 0x89, 0x24, 0x8d, 0xd8, 0x21, 0x8e, 0xf1, 0xf6, 0x26, 0x9c, 0x60, 0xbe, 0xaa, 0x4e, 0xb4, + 0x6d, 0xcc, 0xbe, 0xee, 0xc3, 0xfc, 0x8c, 0xb8, 0xb1, 0xf1, 0x36, 0x4f, 0x6b, 0x0f, 0x7a, 0x47, + 0x24, 0xc7, 0xf8, 0xf6, 0x66, 0x7f, 0xb3, 0x0f, 0x1e, 0x5e, 0xa9, 0xe6, 0x87, 0xec, 0x79, 0x09, + 0x46, 0xb8, 0x20, 0x48, 0x07, 0xdd, 0x69, 0x88, 0x7a, 0x95, 0x6e, 0x73, 0x5d, 0xc3, 0x61, 0x83, + 0x12, 0x9d, 0x81, 0xa2, 0xfb, 0x96, 0x97, 0x7c, 0xee, 0xb6, 0xf2, 0xfa, 0x1a, 0xa6, 0x70, 0x8a, + 0xa6, 0x32, 0x25, 0xdf, 0xac, 0x15, 0x5a, 0xc9, 0x95, 0xaf, 0xc2, 0x98, 0x1b, 0xd6, 0x42, 0x77, + 0xc5, 0xa3, 0x2b, 0x50, 0x5b, 0xc3, 0x4a, 0x9b, 0x40, 0x1b, 0xad, 0xb0, 0x38, 0x41, 0xad, 0x9d, + 0x1c, 0xfd, 0x3d, 0xcb, 0xa5, 0x5d, 0x03, 0x06, 0xd0, 0x8d, 0xbd, 0xc5, 0xbe, 0x2e, 0x64, 0x9a, + 0x70, 0xb1, 0xb1, 0xf3, 0x0f, 0x0e, 0xb1, 0xc4, 0xd1, 0xab, 0x5a, 0x6d, 0xdb, 0x69, 0xcd, 0xb5, + 0xa3, 0xed, 0x45, 0x37, 0xac, 0xf9, 0xbb, 0x24, 0xd8, 0x63, 0xb7, 0xec, 0xa1, 0xf8, 0xaa, 0xa6, + 0x10, 0x0b, 0x57, 0xe6, 0x2a, 0x94, 0x12, 0xa7, 0xcb, 0xa0, 0x39, 0x18, 0x97, 0xc0, 0x2a, 0x09, + 0xd9, 0xe6, 0x3e, 0xcc, 0xd8, 0xa8, 0x07, 0x68, 0x02, 0xac, 0x98, 0x24, 0xe9, 0x4d, 0xd1, 0x15, + 0x8e, 0x42, 0x74, 0xfd, 0x00, 0x8c, 0xba, 0x9e, 0x1b, 0xb9, 0x4e, 0xe4, 0x73, 0x33, 0x0e, 0xbf, + 0x50, 0x33, 0xd5, 0xf1, 0x8a, 0x8e, 0xc0, 0x26, 0x9d, 0xfd, 0x9f, 0xfa, 0x60, 0x92, 0x0d, 0xdb, + 0xbb, 0x33, 0xec, 0x3b, 0x69, 0x86, 0xdd, 0x48, 0xcf, 0xb0, 0xa3, 0x90, 0xc9, 0xef, 0x7b, 0x9a, + 0x7d, 0x1a, 0x4a, 0xea, 0xcd, 0x9d, 0x7c, 0x74, 0x6b, 0xe5, 0x3c, 0xba, 0xed, 0x7e, 0x2e, 0x4b, + 0xcf, 0xb0, 0x62, 0xa6, 0x67, 0xd8, 0x97, 0x2d, 0x88, 0x4d, 0x06, 0xe8, 0x75, 0x28, 0xb5, 0x7c, + 0xe6, 0x68, 0x1a, 0x48, 0xef, 0xed, 0xc7, 0x3b, 0xda, 0x1c, 0x78, 0xd4, 0xb2, 0x80, 0xf7, 0x42, + 0x45, 0x16, 0xc5, 0x31, 0x17, 0x74, 0x15, 0x06, 0x5b, 0x01, 0xa9, 0x46, 0x2c, 0xa4, 0x4e, 0xef, + 0x0c, 0xf9, 0xac, 0xe1, 0x05, 0xb1, 0xe4, 0x60, 0xff, 0x67, 0x0b, 0x26, 0x92, 0xa4, 0xe8, 0x43, + 0xd0, 0x47, 0xee, 0x90, 0x9a, 0x68, 0x6f, 0xe6, 0x21, 0x1b, 0x2b, 0x1d, 0x78, 0x07, 0xd0, 0xff, + 0x98, 0x95, 0x42, 0x57, 0x60, 0x90, 0x9e, 0xb0, 0x97, 0x55, 0xf8, 0xb8, 0x47, 0xf3, 0x4e, 0x69, + 0x25, 0xaa, 0xf0, 0xc6, 0x09, 0x10, 0x96, 0xc5, 0x99, 0x3b, 0x56, 0xad, 0x55, 0xa5, 0x97, 0x97, + 0xa8, 0xd3, 0x1d, 0x7b, 0x7d, 0xa1, 0xc2, 0x89, 0x04, 0x37, 0xee, 0x8e, 0x25, 0x81, 0x38, 0x66, + 0x62, 0xff, 0xbc, 0x05, 0xc0, 0xbd, 0xcf, 0x1c, 0x6f, 0x8b, 0x1c, 0x83, 0x9e, 0x7c, 0x11, 0xfa, + 0xc2, 0x16, 0xa9, 0x75, 0xf2, 0x81, 0x8e, 0xdb, 0x53, 0x6d, 0x91, 0x5a, 0x3c, 0xe3, 0xe8, 0x3f, + 0xcc, 0x4a, 0xdb, 0xdf, 0x07, 0x30, 0x16, 0x93, 0xad, 0x44, 0xa4, 0x89, 0x9e, 0x35, 0x02, 0x75, + 0x9c, 0x4e, 0x04, 0xea, 0x28, 0x31, 0x6a, 0x4d, 0x25, 0xfb, 0x69, 0x28, 0x36, 0x9d, 0x3b, 0x42, + 0xe7, 0xf6, 0x74, 0xe7, 0x66, 0x50, 0xfe, 0xb3, 0xab, 0xce, 0x1d, 0x7e, 0x2d, 0x7d, 0x5a, 0xae, + 0x90, 0x55, 0xe7, 0x4e, 0x57, 0x3f, 0x5d, 0x5a, 0x09, 0xab, 0xcb, 0xf5, 0x84, 0x63, 0x55, 0x4f, + 0x75, 0xb9, 0x5e, 0xb2, 0x2e, 0xd7, 0xeb, 0xa1, 0x2e, 0xd7, 0x43, 0x77, 0x61, 0x50, 0xf8, 0x3d, + 0x8a, 0x50, 0x5e, 0x17, 0x7b, 0xa8, 0x4f, 0xb8, 0x4d, 0xf2, 0x3a, 0x2f, 0xca, 0x6b, 0xb7, 0x80, + 0x76, 0xad, 0x57, 0x56, 0x88, 0xfe, 0xaa, 0x05, 0x63, 0xe2, 0x37, 0x26, 0x6f, 0xb5, 0x49, 0x18, + 0x09, 0xb1, 0xf4, 0xfd, 0xbd, 0xb7, 0x41, 0x14, 0xe4, 0x4d, 0x79, 0xbf, 0x3c, 0x67, 0x4c, 0x64, + 0xd7, 0x16, 0x25, 0x5a, 0x81, 0xfe, 0xae, 0x05, 0x27, 0x9a, 0xce, 0x1d, 0x5e, 0x23, 0x87, 0x61, + 0x27, 0x72, 0x7d, 0xe1, 0x3f, 0xf0, 0xa1, 0xde, 0x86, 0x3f, 0x55, 0x9c, 0x37, 0x52, 0xda, 0x1f, + 0x4f, 0x64, 0x91, 0x74, 0x6d, 0x6a, 0x66, 0xbb, 0x66, 0x36, 0x61, 0x48, 0xce, 0xb7, 0x07, 0xe9, + 0x64, 0xcd, 0xea, 0x11, 0x73, 0xed, 0x81, 0xd6, 0xf3, 0x69, 0x18, 0xd1, 0xe7, 0xd8, 0x03, 0xad, + 0xeb, 0x2d, 0x98, 0xca, 0x98, 0x4b, 0x0f, 0xb4, 0xca, 0xdb, 0x70, 0x3a, 0x77, 0x7e, 0x3c, 0x50, + 0x27, 0xf9, 0xaf, 0x5a, 0xfa, 0x3e, 0x78, 0x0c, 0xc6, 0x8a, 0x05, 0xd3, 0x58, 0x71, 0xb6, 0xf3, + 0xca, 0xc9, 0xb1, 0x58, 0xbc, 0xa9, 0x37, 0x9a, 0xee, 0xea, 0xe8, 0x35, 0x18, 0x68, 0x50, 0x88, + 0xf4, 0x9e, 0xb5, 0xbb, 0xaf, 0xc8, 0x58, 0x98, 0x64, 0xf0, 0x10, 0x0b, 0x0e, 0xf6, 0x2f, 0x59, + 0xd0, 0x77, 0x0c, 0x3d, 0x81, 0xcd, 0x9e, 0x78, 0x36, 0x97, 0xb5, 0x88, 0x6a, 0x3e, 0x8b, 0x9d, + 0xdb, 0x4b, 0x77, 0x22, 0xe2, 0x85, 0xec, 0x44, 0xce, 0xec, 0x98, 0x9f, 0xb6, 0x60, 0xea, 0x9a, + 0xef, 0xd4, 0xe7, 0x9d, 0x86, 0xe3, 0xd5, 0x48, 0xb0, 0xe2, 0x6d, 0x1d, 0xca, 0xf5, 0xbb, 0xd0, + 0xd5, 0xf5, 0x7b, 0x41, 0x7a, 0x4e, 0xf5, 0xe5, 0x8f, 0x1f, 0x95, 0xa4, 0x93, 0xa1, 0x8b, 0x0c, + 0x1f, 0xdf, 0x6d, 0x40, 0x7a, 0x2b, 0xc5, 0x03, 0x28, 0x0c, 0x83, 0x2e, 0x6f, 0xaf, 0x18, 0xc4, + 0x27, 0xb3, 0x25, 0xdc, 0xd4, 0xe7, 0x69, 0x4f, 0x7b, 0x38, 0x00, 0x4b, 0x46, 0xf6, 0x4b, 0x90, + 0x19, 0x6a, 0xa2, 0xbb, 0x5e, 0xc2, 0xfe, 0x18, 0x4c, 0xb2, 0x92, 0x87, 0xd4, 0x0c, 0xd8, 0x09, + 0x6d, 0x6a, 0x46, 0xd8, 0x4c, 0xfb, 0xf3, 0x16, 0x8c, 0xaf, 0x25, 0xa2, 0x09, 0x9e, 0x67, 0xf6, + 0xd7, 0x0c, 0x25, 0x7e, 0x95, 0x41, 0xb1, 0xc0, 0x1e, 0xb9, 0x92, 0xeb, 0xcf, 0x2d, 0x88, 0xa3, + 0xbf, 0x1c, 0x83, 0xf8, 0xb6, 0x60, 0x88, 0x6f, 0x99, 0x82, 0xac, 0x6a, 0x4e, 0x9e, 0xf4, 0x86, + 0xae, 0xaa, 0xb8, 0x68, 0x1d, 0x64, 0xd8, 0x98, 0x0d, 0x9f, 0x8a, 0x63, 0x66, 0xf0, 0x34, 0x19, + 0x29, 0xcd, 0xfe, 0xdd, 0x02, 0x20, 0x45, 0xdb, 0x73, 0xdc, 0xb6, 0x74, 0x89, 0xa3, 0x89, 0xdb, + 0xb6, 0x0b, 0x88, 0x79, 0x10, 0x04, 0x8e, 0x17, 0x72, 0xb6, 0xae, 0x50, 0xeb, 0x1d, 0xce, 0x3d, + 0x61, 0x46, 0xbe, 0x0d, 0xbb, 0x96, 0xe2, 0x86, 0x33, 0x6a, 0xd0, 0x3c, 0x43, 0xfa, 0x7b, 0xf5, + 0x0c, 0x19, 0xe8, 0xf2, 0xc8, 0xf1, 0x2b, 0x16, 0x8c, 0xaa, 0x6e, 0x7a, 0x87, 0xb8, 0xc2, 0xab, + 0xf6, 0xe4, 0x6c, 0xa0, 0x15, 0xad, 0xc9, 0xec, 0x60, 0xf9, 0x2e, 0xf6, 0x58, 0xd5, 0x69, 0xb8, + 0x77, 0x89, 0x8a, 0xf3, 0x59, 0x16, 0x8f, 0x4f, 0x05, 0xf4, 0x60, 0xbf, 0x3c, 0xaa, 0xfe, 0xf1, + 0x38, 0xe6, 0x71, 0x11, 0xba, 0x25, 0x8f, 0x27, 0xa6, 0x22, 0x7a, 0x11, 0xfa, 0x5b, 0xdb, 0x4e, + 0x48, 0x12, 0x4f, 0x86, 0xfa, 0x2b, 0x14, 0x78, 0xb0, 0x5f, 0x1e, 0x53, 0x05, 0x18, 0x04, 0x73, + 0xea, 0xde, 0xa3, 0xe1, 0xa5, 0x27, 0x67, 0xd7, 0x68, 0x78, 0x7f, 0x6a, 0x41, 0xdf, 0x9a, 0x5f, + 0x3f, 0x8e, 0x2d, 0xe0, 0x55, 0x63, 0x0b, 0x78, 0x24, 0x2f, 0xc5, 0x44, 0xee, 0xea, 0x5f, 0x4e, + 0xac, 0xfe, 0xb3, 0xb9, 0x1c, 0x3a, 0x2f, 0xfc, 0x26, 0x0c, 0xb3, 0xc4, 0x15, 0xe2, 0x79, 0xd4, + 0xf3, 0xc6, 0x82, 0x2f, 0x27, 0x16, 0xfc, 0xb8, 0x46, 0xaa, 0xad, 0xf4, 0xa7, 0x60, 0x50, 0xbc, + 0xb7, 0x49, 0xbe, 0xf9, 0x15, 0xb4, 0x58, 0xe2, 0xed, 0x9f, 0x28, 0x82, 0x91, 0x28, 0x03, 0xfd, + 0x8a, 0x05, 0xb3, 0x01, 0xf7, 0xc3, 0xad, 0x2f, 0xb6, 0x03, 0xd7, 0xdb, 0xaa, 0xd6, 0xb6, 0x49, + 0xbd, 0xdd, 0x70, 0xbd, 0xad, 0x95, 0x2d, 0xcf, 0x57, 0xe0, 0xa5, 0x3b, 0xa4, 0xd6, 0x66, 0x66, + 0xb7, 0x2e, 0x59, 0x39, 0x94, 0x3f, 0xfb, 0x73, 0xf7, 0xf6, 0xcb, 0xb3, 0xf8, 0x50, 0xbc, 0xf1, + 0x21, 0xdb, 0x82, 0x7e, 0xcb, 0x82, 0x8b, 0x3c, 0x7f, 0x44, 0xef, 0xed, 0xef, 0x70, 0x5b, 0xae, + 0x48, 0x56, 0x31, 0x93, 0x75, 0x12, 0x34, 0xe7, 0x3f, 0x20, 0x3a, 0xf4, 0x62, 0xe5, 0x70, 0x75, + 0xe1, 0xc3, 0x36, 0xce, 0xfe, 0xc7, 0x45, 0x18, 0x15, 0x51, 0xd3, 0xc4, 0x19, 0xf0, 0xa2, 0x31, + 0x25, 0x1e, 0x4d, 0x4c, 0x89, 0x49, 0x83, 0xf8, 0x68, 0xb6, 0xff, 0x10, 0x26, 0xe9, 0xe6, 0x7c, + 0x85, 0x38, 0x41, 0xb4, 0x41, 0x1c, 0xee, 0xf0, 0x55, 0x3c, 0xf4, 0xee, 0xaf, 0xf4, 0x93, 0xd7, + 0x92, 0xcc, 0x70, 0x9a, 0xff, 0x77, 0xd2, 0x99, 0xe3, 0xc1, 0x44, 0x2a, 0xf0, 0xdd, 0x1b, 0x50, + 0x52, 0x8f, 0x45, 0xc4, 0xa6, 0xd3, 0x39, 0x7e, 0x64, 0x92, 0x03, 0x57, 0x7f, 0xc5, 0x0f, 0x95, + 0x62, 0x76, 0xf6, 0xdf, 0x2f, 0x18, 0x15, 0xf2, 0x41, 0x5c, 0x83, 0x21, 0x27, 0x0c, 0xdd, 0x2d, + 0x8f, 0xd4, 0x3b, 0x69, 0x28, 0x53, 0xd5, 0xb0, 0x07, 0x3b, 0x73, 0xa2, 0x24, 0x56, 0x3c, 0xd0, + 0x15, 0xee, 0x56, 0xb7, 0x4b, 0x3a, 0xa9, 0x27, 0x53, 0xdc, 0x40, 0x3a, 0xde, 0xed, 0x12, 0x2c, + 0xca, 0xa3, 0x4f, 0x70, 0xbf, 0xc7, 0xab, 0x9e, 0x7f, 0xdb, 0xbb, 0xec, 0xfb, 0x32, 0xe8, 0x46, + 0x6f, 0x0c, 0x27, 0xa5, 0xb7, 0xa3, 0x2a, 0x8e, 0x4d, 0x6e, 0xbd, 0x45, 0x92, 0xfd, 0x0c, 0xb0, + 0x78, 0xf9, 0xe6, 0xdb, 0xec, 0x10, 0x11, 0x18, 0x17, 0x21, 0xf9, 0x24, 0x4c, 0xf4, 0x5d, 0xe6, + 0x55, 0xce, 0x2c, 0x1d, 0x2b, 0xd2, 0xaf, 0x9a, 0x2c, 0x70, 0x92, 0xa7, 0xfd, 0xb3, 0x16, 0xb0, + 0x77, 0xaa, 0xc7, 0x20, 0x8f, 0x7c, 0xd8, 0x94, 0x47, 0xa6, 0xf3, 0x3a, 0x39, 0x47, 0x14, 0x79, + 0x81, 0xcf, 0xac, 0x4a, 0xe0, 0xdf, 0xd9, 0x13, 0xce, 0x2a, 0xdd, 0xef, 0x1f, 0xf6, 0xff, 0xb6, + 0xf8, 0x26, 0x16, 0xbf, 0xea, 0xff, 0x2c, 0x0c, 0xd5, 0x9c, 0x96, 0x53, 0xe3, 0x59, 0x9d, 0x72, + 0x35, 0x7a, 0x46, 0xa1, 0xd9, 0x05, 0x51, 0x82, 0x6b, 0xa8, 0x64, 0x68, 0xc7, 0x21, 0x09, 0xee, + 0xaa, 0x95, 0x52, 0x55, 0xce, 0xec, 0xc0, 0xa8, 0xc1, 0xec, 0x81, 0xaa, 0x33, 0x3e, 0xcb, 0x8f, + 0x58, 0x15, 0x8a, 0xb4, 0x09, 0x93, 0x9e, 0xf6, 0x9f, 0x1e, 0x28, 0xf2, 0x72, 0xf9, 0x78, 0xb7, + 0x43, 0x94, 0x9d, 0x3e, 0xda, 0x13, 0xd8, 0x04, 0x1b, 0x9c, 0xe6, 0x6c, 0xff, 0xa4, 0x05, 0x0f, + 0xe9, 0x84, 0xda, 0x2b, 0x9b, 0x6e, 0x46, 0x92, 0x45, 0x18, 0xf2, 0x5b, 0x24, 0x70, 0x22, 0x3f, + 0x10, 0xa7, 0xc6, 0x05, 0xd9, 0xe9, 0xd7, 0x05, 0xfc, 0x40, 0xe4, 0x28, 0x90, 0xdc, 0x25, 0x1c, + 0xab, 0x92, 0xf4, 0xf6, 0xc9, 0x3a, 0x23, 0x14, 0xef, 0xa9, 0xd8, 0x1e, 0xc0, 0x2c, 0xe9, 0x21, + 0x16, 0x18, 0xfb, 0x9b, 0x16, 0x9f, 0x58, 0x7a, 0xd3, 0xd1, 0x5b, 0x30, 0xd1, 0x74, 0xa2, 0xda, + 0xf6, 0xd2, 0x9d, 0x56, 0xc0, 0x4d, 0x4e, 0xb2, 0x9f, 0x9e, 0xee, 0xd6, 0x4f, 0xda, 0x47, 0xc6, + 0xae, 0x9c, 0xab, 0x09, 0x66, 0x38, 0xc5, 0x1e, 0x6d, 0xc0, 0x30, 0x83, 0xb1, 0xa7, 0x82, 0x61, + 0x27, 0xd1, 0x20, 0xaf, 0x36, 0xe5, 0x8c, 0xb0, 0x1a, 0xf3, 0xc1, 0x3a, 0x53, 0xfb, 0xcb, 0x45, + 0xbe, 0xda, 0x99, 0x28, 0xff, 0x14, 0x0c, 0xb6, 0xfc, 0xfa, 0xc2, 0xca, 0x22, 0x16, 0xa3, 0xa0, + 0x8e, 0x91, 0x0a, 0x07, 0x63, 0x89, 0x47, 0x17, 0x60, 0x48, 0xfc, 0x94, 0x26, 0x42, 0xb6, 0x37, + 0x0b, 0xba, 0x10, 0x2b, 0x2c, 0x7a, 0x0e, 0xa0, 0x15, 0xf8, 0xbb, 0x6e, 0x9d, 0x85, 0x0e, 0x29, + 0x9a, 0x7e, 0x44, 0x15, 0x85, 0xc1, 0x1a, 0x15, 0x7a, 0x05, 0x46, 0xdb, 0x5e, 0xc8, 0xc5, 0x11, + 0x2d, 0x9a, 0xb2, 0xf2, 0x70, 0xb9, 0xa1, 0x23, 0xb1, 0x49, 0x8b, 0xe6, 0x60, 0x20, 0x72, 0x98, + 0x5f, 0x4c, 0x7f, 0xbe, 0xbb, 0xef, 0x3a, 0xa5, 0xd0, 0x13, 0x08, 0xd1, 0x02, 0x58, 0x14, 0x44, + 0x6f, 0xc8, 0x57, 0xbb, 0x7c, 0x63, 0x17, 0x7e, 0xf6, 0xbd, 0x1d, 0x02, 0xda, 0x9b, 0x5d, 0xe1, + 0xbf, 0x6f, 0xf0, 0x42, 0x2f, 0x03, 0x90, 0x3b, 0x11, 0x09, 0x3c, 0xa7, 0xa1, 0xbc, 0xd9, 0x94, + 0x5c, 0xb0, 0xe8, 0xaf, 0xf9, 0xd1, 0x8d, 0x90, 0x2c, 0x29, 0x0a, 0xac, 0x51, 0xdb, 0xbf, 0x55, + 0x02, 0x88, 0xe5, 0x76, 0x74, 0x37, 0xb5, 0x71, 0x3d, 0xd3, 0x59, 0xd2, 0x3f, 0xba, 0x5d, 0x0b, + 0x7d, 0xbf, 0x05, 0xc3, 0x22, 0x42, 0x0a, 0x1b, 0xa1, 0x42, 0xe7, 0x8d, 0xd3, 0x0c, 0xd4, 0x42, + 0x4b, 0xf0, 0x26, 0x3c, 0x2f, 0x67, 0xa8, 0x86, 0xe9, 0xda, 0x0a, 0xbd, 0x62, 0xf4, 0x3e, 0x79, + 0x55, 0x2c, 0x1a, 0x5d, 0xa9, 0xae, 0x8a, 0x25, 0x76, 0x46, 0xe8, 0xb7, 0xc4, 0x1b, 0xc6, 0x2d, + 0xb1, 0x2f, 0xff, 0x59, 0xa2, 0x21, 0xbe, 0x76, 0xbb, 0x20, 0xa2, 0x8a, 0x1e, 0xa2, 0xa0, 0x3f, + 0xff, 0x79, 0x9e, 0x76, 0x4f, 0xea, 0x12, 0x9e, 0xe0, 0xd3, 0x30, 0x5e, 0x37, 0x85, 0x00, 0x31, + 0x13, 0x9f, 0xcc, 0xe3, 0x9b, 0x90, 0x19, 0xe2, 0x63, 0x3f, 0x81, 0xc0, 0x49, 0xc6, 0xa8, 0xc2, + 0x23, 0x56, 0xac, 0x78, 0x9b, 0xbe, 0x78, 0xeb, 0x61, 0xe7, 0x8e, 0xe5, 0x5e, 0x18, 0x91, 0x26, + 0xa5, 0x8c, 0x4f, 0xf7, 0x35, 0x51, 0x16, 0x2b, 0x2e, 0xe8, 0x35, 0x18, 0x60, 0xef, 0xb3, 0xc2, + 0xe9, 0xa1, 0x7c, 0x8d, 0xb3, 0x19, 0xba, 0x2f, 0x5e, 0x90, 0xec, 0x6f, 0x88, 0x05, 0x07, 0x74, + 0x45, 0xbe, 0x7e, 0x0c, 0x57, 0xbc, 0x1b, 0x21, 0x61, 0xaf, 0x1f, 0x4b, 0xf3, 0x8f, 0xc7, 0x0f, + 0x1b, 0x39, 0x3c, 0x33, 0xcd, 0xa0, 0x51, 0x92, 0x4a, 0x51, 0xe2, 0xbf, 0xcc, 0x5e, 0x28, 0x02, + 0x0d, 0x65, 0x36, 0xcf, 0xcc, 0x70, 0x18, 0x77, 0xe7, 0x4d, 0x93, 0x05, 0x4e, 0xf2, 0xa4, 0x12, + 0x29, 0x5f, 0xf5, 0xe2, 0xb5, 0x48, 0xb7, 0xbd, 0x83, 0x5f, 0xc4, 0xd9, 0x69, 0xc4, 0x21, 0x58, + 0x94, 0x3f, 0x56, 0xf1, 0x60, 0xc6, 0x83, 0x89, 0xe4, 0x12, 0x7d, 0xa0, 0xe2, 0xc8, 0x1f, 0xf6, + 0xc1, 0x98, 0x39, 0xa5, 0xd0, 0x45, 0x28, 0x09, 0x26, 0x2a, 0x03, 0x88, 0x5a, 0x25, 0xab, 0x12, + 0x81, 0x63, 0x1a, 0x96, 0xf8, 0x85, 0x15, 0xd7, 0xdc, 0x83, 0xe3, 0xc4, 0x2f, 0x0a, 0x83, 0x35, + 0x2a, 0x7a, 0xb1, 0xda, 0xf0, 0xfd, 0x48, 0x1d, 0x48, 0x6a, 0xde, 0xcd, 0x33, 0x28, 0x16, 0x58, + 0x7a, 0x10, 0xed, 0x90, 0xc0, 0x23, 0x0d, 0x33, 0xf2, 0xb6, 0x3a, 0x88, 0xae, 0xea, 0x48, 0x6c, + 0xd2, 0xd2, 0xe3, 0xd4, 0x0f, 0xd9, 0x44, 0x16, 0xd7, 0xb7, 0xd8, 0xdd, 0xba, 0xca, 0x5f, 0x79, + 0x4b, 0x3c, 0xfa, 0x18, 0x3c, 0xa4, 0x02, 0x67, 0x61, 0x6e, 0xcd, 0x90, 0x35, 0x0e, 0x18, 0xda, + 0x96, 0x87, 0x16, 0xb2, 0xc9, 0x70, 0x5e, 0x79, 0xf4, 0x2a, 0x8c, 0x09, 0x11, 0x5f, 0x72, 0x1c, + 0x34, 0x3d, 0x8c, 0xae, 0x1a, 0x58, 0x9c, 0xa0, 0x96, 0xb1, 0xc3, 0x99, 0x94, 0x2d, 0x39, 0x0c, + 0xa5, 0x63, 0x87, 0xeb, 0x78, 0x9c, 0x2a, 0x81, 0xe6, 0x60, 0x9c, 0xcb, 0x60, 0xae, 0xb7, 0xc5, + 0xc7, 0x44, 0x3c, 0xe6, 0x52, 0x4b, 0xea, 0xba, 0x89, 0xc6, 0x49, 0x7a, 0xf4, 0x12, 0x8c, 0x38, + 0x41, 0x6d, 0xdb, 0x8d, 0x48, 0x2d, 0x6a, 0x07, 0xfc, 0x95, 0x97, 0xe6, 0xa2, 0x35, 0xa7, 0xe1, + 0xb0, 0x41, 0x69, 0xdf, 0x85, 0xa9, 0x8c, 0xf0, 0x0f, 0x74, 0xe2, 0x38, 0x2d, 0x57, 0x7e, 0x53, + 0xc2, 0xc3, 0x79, 0xae, 0xb2, 0x22, 0xbf, 0x46, 0xa3, 0xa2, 0xb3, 0x93, 0x85, 0x89, 0xd0, 0x92, + 0x95, 0xaa, 0xd9, 0xb9, 0x2c, 0x11, 0x38, 0xa6, 0xb1, 0xff, 0x5b, 0x01, 0xc6, 0x33, 0x6c, 0x2b, + 0x2c, 0x61, 0x66, 0xe2, 0x92, 0x12, 0xe7, 0xc7, 0x34, 0x43, 0xd1, 0x17, 0x0e, 0x11, 0x8a, 0xbe, + 0xd8, 0x2d, 0x14, 0x7d, 0xdf, 0xdb, 0x09, 0x45, 0x6f, 0xf6, 0x58, 0x7f, 0x4f, 0x3d, 0x96, 0x11, + 0xbe, 0x7e, 0xe0, 0x90, 0xe1, 0xeb, 0x8d, 0x4e, 0x1f, 0xec, 0xa1, 0xd3, 0x7f, 0xb4, 0x00, 0x13, + 0x49, 0x57, 0xd2, 0x63, 0xd0, 0xdb, 0xbe, 0x66, 0xe8, 0x6d, 0x2f, 0xf4, 0xf2, 0xf8, 0x36, 0x57, + 0x87, 0x8b, 0x13, 0x3a, 0xdc, 0xf7, 0xf6, 0xc4, 0xad, 0xb3, 0x3e, 0xf7, 0xa7, 0x0a, 0x70, 0x32, + 0xf3, 0xf5, 0xef, 0x31, 0xf4, 0xcd, 0x75, 0xa3, 0x6f, 0x9e, 0xed, 0xf9, 0x61, 0x72, 0x6e, 0x07, + 0xdd, 0x4a, 0x74, 0xd0, 0xc5, 0xde, 0x59, 0x76, 0xee, 0xa5, 0xaf, 0x17, 0xe1, 0x6c, 0x66, 0xb9, + 0x58, 0xed, 0xb9, 0x6c, 0xa8, 0x3d, 0x9f, 0x4b, 0xa8, 0x3d, 0xed, 0xce, 0xa5, 0x8f, 0x46, 0x0f, + 0x2a, 0x1e, 0xe8, 0xb2, 0x30, 0x03, 0xf7, 0xa9, 0x03, 0x35, 0x1e, 0xe8, 0x2a, 0x46, 0xd8, 0xe4, + 0xfb, 0x9d, 0xa4, 0xfb, 0xfc, 0x97, 0x16, 0x9c, 0xce, 0x1c, 0x9b, 0x63, 0xd0, 0x75, 0xad, 0x99, + 0xba, 0xae, 0xa7, 0x7a, 0x9e, 0xad, 0x39, 0xca, 0xaf, 0x9f, 0xe9, 0xcf, 0xf9, 0x16, 0x76, 0x93, + 0xbf, 0x0e, 0xc3, 0x4e, 0xad, 0x46, 0xc2, 0x70, 0xd5, 0xaf, 0xab, 0x40, 0xd8, 0xcf, 0xb2, 0x7b, + 0x56, 0x0c, 0x3e, 0xd8, 0x2f, 0xcf, 0x24, 0x59, 0xc4, 0x68, 0xac, 0x73, 0x40, 0x9f, 0x80, 0xa1, + 0x50, 0x9c, 0x9b, 0x62, 0xec, 0x9f, 0xef, 0xb1, 0x73, 0x9c, 0x0d, 0xd2, 0x30, 0x23, 0x2e, 0x29, + 0x4d, 0x85, 0x62, 0x69, 0x46, 0x67, 0x29, 0x1c, 0x69, 0x74, 0x96, 0xe7, 0x00, 0x76, 0xd5, 0x65, + 0x20, 0xa9, 0x7f, 0xd0, 0xae, 0x09, 0x1a, 0x15, 0xfa, 0x08, 0x4c, 0x84, 0x3c, 0x24, 0xe1, 0x42, + 0xc3, 0x09, 0xd9, 0x3b, 0x1a, 0x31, 0x0b, 0x59, 0x54, 0xa7, 0x6a, 0x02, 0x87, 0x53, 0xd4, 0x68, + 0x59, 0xd6, 0xca, 0xe2, 0x27, 0xf2, 0x89, 0x79, 0x3e, 0xae, 0x51, 0xa4, 0xeb, 0x3e, 0x91, 0xec, + 0x7e, 0xd6, 0xf1, 0x5a, 0x49, 0xf4, 0x09, 0x00, 0x3a, 0x7d, 0x84, 0x1e, 0x62, 0x30, 0x7f, 0xf3, + 0xa4, 0xbb, 0x4a, 0x3d, 0xd3, 0xb9, 0x99, 0xbd, 0xa9, 0x5d, 0x54, 0x4c, 0xb0, 0xc6, 0x10, 0x39, + 0x30, 0x1a, 0xff, 0x8b, 0xb3, 0xd9, 0x5e, 0xc8, 0xad, 0x21, 0xc9, 0x9c, 0xa9, 0xbc, 0x17, 0x75, + 0x16, 0xd8, 0xe4, 0x68, 0xff, 0xf8, 0x20, 0x3c, 0xdc, 0x61, 0x1b, 0x46, 0x73, 0xa6, 0xa9, 0xf7, + 0xe9, 0xe4, 0xfd, 0x7d, 0x26, 0xb3, 0xb0, 0x71, 0xa1, 0x4f, 0xcc, 0xf6, 0xc2, 0xdb, 0x9e, 0xed, + 0x3f, 0x6c, 0x69, 0x9a, 0x15, 0xee, 0x54, 0xfa, 0xe1, 0x43, 0x1e, 0x2f, 0x47, 0xa8, 0x6a, 0xd9, + 0xcc, 0xd0, 0x57, 0x3c, 0xd7, 0x73, 0x73, 0x7a, 0x57, 0x60, 0x7c, 0x35, 0x3b, 0x0e, 0x2f, 0x57, + 0x65, 0x5c, 0x3e, 0xec, 0xf7, 0x1f, 0x57, 0x4c, 0xde, 0x8f, 0xc9, 0xe8, 0x4b, 0xbc, 0x5e, 0xb1, + 0xd6, 0x5e, 0x8c, 0xc3, 0x29, 0xa9, 0xb3, 0xf4, 0xd1, 0xcc, 0xe6, 0xea, 0x44, 0xd8, 0x60, 0x75, + 0xbc, 0x57, 0xef, 0x6f, 0x51, 0x10, 0xe0, 0xdf, 0xb1, 0xe0, 0x4c, 0xc7, 0x88, 0x30, 0xdf, 0x86, + 0xb2, 0xa1, 0xfd, 0x39, 0x0b, 0xb2, 0x07, 0xdb, 0xf0, 0x28, 0xbb, 0x08, 0xa5, 0x5a, 0x22, 0xef, + 0x66, 0x1c, 0x1b, 0x41, 0xe5, 0xdc, 0x8c, 0x69, 0x0c, 0xc7, 0xb1, 0x42, 0x57, 0xc7, 0xb1, 0x5f, + 0xb7, 0x20, 0xb5, 0xbf, 0x1f, 0x83, 0xa0, 0xb1, 0x62, 0x0a, 0x1a, 0x8f, 0xf7, 0xd2, 0x9b, 0x39, + 0x32, 0xc6, 0x9f, 0x8c, 0xc3, 0xa9, 0x9c, 0x17, 0x79, 0xbb, 0x30, 0xb9, 0x55, 0x23, 0xe6, 0xe3, + 0xea, 0x4e, 0x41, 0x87, 0x3a, 0xbe, 0xc4, 0xe6, 0xe9, 0x4e, 0x53, 0x24, 0x38, 0x5d, 0x05, 0xfa, + 0x9c, 0x05, 0x27, 0x9c, 0xdb, 0xe1, 0x12, 0x15, 0x18, 0xdd, 0xda, 0x7c, 0xc3, 0xaf, 0xed, 0xd0, + 0xd3, 0x58, 0x2e, 0x84, 0x17, 0x32, 0x95, 0x78, 0xb7, 0xaa, 0x29, 0x7a, 0xa3, 0x7a, 0x96, 0xdc, + 0x3a, 0x8b, 0x0a, 0x67, 0xd6, 0x85, 0xb0, 0x48, 0xed, 0x41, 0xaf, 0xa3, 0x1d, 0x9e, 0xff, 0x67, + 0x3d, 0x9d, 0xe4, 0x12, 0x90, 0xc4, 0x60, 0xc5, 0x07, 0x7d, 0x0a, 0x4a, 0x5b, 0xf2, 0xa5, 0x6f, + 0x86, 0x84, 0x15, 0x77, 0x64, 0xe7, 0xf7, 0xcf, 0xdc, 0x12, 0xaf, 0x88, 0x70, 0xcc, 0x14, 0xbd, + 0x0a, 0x45, 0x6f, 0x33, 0xec, 0x94, 0x1f, 0x3a, 0xe1, 0x72, 0xc9, 0x83, 0x6c, 0xac, 0x2d, 0x57, + 0x31, 0x2d, 0x88, 0xae, 0x40, 0x31, 0xd8, 0xa8, 0x0b, 0x0d, 0x74, 0xe6, 0x22, 0xc5, 0xf3, 0x8b, + 0x39, 0xad, 0x62, 0x9c, 0xf0, 0xfc, 0x22, 0xa6, 0x2c, 0x50, 0x05, 0xfa, 0xd9, 0x33, 0x36, 0x21, + 0xcf, 0x64, 0xde, 0xdc, 0x3a, 0x3c, 0x07, 0xe5, 0x91, 0x38, 0x18, 0x01, 0xe6, 0x8c, 0xd0, 0x3a, + 0x0c, 0xd4, 0x58, 0x2e, 0x61, 0x21, 0xc0, 0xbc, 0x2f, 0x53, 0xd7, 0xdc, 0x21, 0xc9, 0xb2, 0x50, + 0xbd, 0x32, 0x0a, 0x2c, 0x78, 0x31, 0xae, 0xa4, 0xb5, 0xbd, 0x19, 0x8a, 0x5c, 0xfb, 0xd9, 0x5c, + 0x3b, 0xe4, 0x0e, 0x17, 0x5c, 0x19, 0x05, 0x16, 0xbc, 0xd0, 0xcb, 0x50, 0xd8, 0xac, 0x89, 0x27, + 0x6a, 0x99, 0x4a, 0x67, 0x33, 0x4e, 0xca, 0xfc, 0xc0, 0xbd, 0xfd, 0x72, 0x61, 0x79, 0x01, 0x17, + 0x36, 0x6b, 0x68, 0x0d, 0x06, 0x37, 0x79, 0x64, 0x05, 0xa1, 0x57, 0x7e, 0x32, 0x3b, 0xe8, 0x43, + 0x2a, 0xf8, 0x02, 0x7f, 0xee, 0x24, 0x10, 0x58, 0x32, 0x61, 0x99, 0x26, 0x54, 0x84, 0x08, 0x11, + 0xa0, 0x6e, 0xf6, 0x70, 0x51, 0x3d, 0xb8, 0x7c, 0x19, 0xc7, 0x99, 0xc0, 0x1a, 0x47, 0x3a, 0xab, + 0x9d, 0xbb, 0xed, 0x80, 0x85, 0x1a, 0x17, 0x91, 0x8c, 0x32, 0x67, 0xf5, 0x9c, 0x24, 0xea, 0x34, + 0xab, 0x15, 0x11, 0x8e, 0x99, 0xa2, 0x1d, 0x18, 0xdd, 0x0d, 0x5b, 0xdb, 0x44, 0x2e, 0x69, 0x16, + 0xd8, 0x28, 0x47, 0x3e, 0xba, 0x29, 0x08, 0xdd, 0x20, 0x6a, 0x3b, 0x8d, 0xd4, 0x2e, 0xc4, 0x64, + 0xd9, 0x9b, 0x3a, 0x33, 0x6c, 0xf2, 0xa6, 0xdd, 0xff, 0x56, 0xdb, 0xdf, 0xd8, 0x8b, 0x88, 0x88, + 0x2b, 0x97, 0xd9, 0xfd, 0xaf, 0x73, 0x92, 0x74, 0xf7, 0x0b, 0x04, 0x96, 0x4c, 0xd0, 0x4d, 0xd1, + 0x3d, 0x6c, 0xf7, 0x9c, 0xc8, 0x8f, 0x30, 0x3b, 0x27, 0x89, 0x72, 0x3a, 0x85, 0xed, 0x96, 0x31, + 0x2b, 0xb6, 0x4b, 0xb6, 0xb6, 0xfd, 0xc8, 0xf7, 0x12, 0x3b, 0xf4, 0x64, 0xfe, 0x2e, 0x59, 0xc9, + 0xa0, 0x4f, 0xef, 0x92, 0x59, 0x54, 0x38, 0xb3, 0x2e, 0x54, 0x87, 0xb1, 0x96, 0x1f, 0x44, 0xb7, + 0xfd, 0x40, 0xce, 0x2f, 0xd4, 0x41, 0x2f, 0x66, 0x50, 0x8a, 0x1a, 0x59, 0xc8, 0x46, 0x13, 0x83, + 0x13, 0x3c, 0xd1, 0x47, 0x61, 0x30, 0xac, 0x39, 0x0d, 0xb2, 0x72, 0x7d, 0x7a, 0x2a, 0xff, 0xf8, + 0xa9, 0x72, 0x92, 0x9c, 0xd9, 0xc5, 0x03, 0x63, 0x70, 0x12, 0x2c, 0xd9, 0xa1, 0x65, 0xe8, 0x67, + 0xe9, 0x16, 0x59, 0x10, 0xc4, 0x9c, 0x40, 0xb9, 0x29, 0x07, 0x78, 0xbe, 0x37, 0x31, 0x30, 0xe6, + 0xc5, 0xe9, 0x1a, 0x10, 0xd7, 0x43, 0x3f, 0x9c, 0x3e, 0x99, 0xbf, 0x06, 0xc4, 0xad, 0xf2, 0x7a, + 0xb5, 0xd3, 0x1a, 0x50, 0x44, 0x38, 0x66, 0x4a, 0x77, 0x66, 0xba, 0x9b, 0x9e, 0xea, 0xe0, 0xb9, + 0x95, 0xbb, 0x97, 0xb2, 0x9d, 0x99, 0xee, 0xa4, 0x94, 0x85, 0xfd, 0x07, 0x83, 0x69, 0x99, 0x85, + 0x29, 0x14, 0xfe, 0x82, 0x95, 0xb2, 0x35, 0xbf, 0xbf, 0x57, 0xfd, 0xe6, 0x11, 0x5e, 0x85, 0x3e, + 0x67, 0xc1, 0xa9, 0x56, 0xe6, 0x87, 0x08, 0x01, 0xa0, 0x37, 0x35, 0x29, 0xff, 0x74, 0x15, 0x30, + 0x33, 0x1b, 0x8f, 0x73, 0x6a, 0x4a, 0x5e, 0x37, 0x8b, 0x6f, 0xfb, 0xba, 0xb9, 0x0a, 0x43, 0x35, + 0x7e, 0x15, 0xe9, 0x98, 0x5b, 0x3f, 0x79, 0xf7, 0x66, 0xa2, 0x84, 0xb8, 0xc3, 0x6c, 0x62, 0xc5, + 0x02, 0xfd, 0x88, 0x05, 0x67, 0x92, 0x4d, 0xc7, 0x84, 0xa1, 0x45, 0x94, 0x4d, 0xae, 0xcb, 0x58, + 0x16, 0xdf, 0x9f, 0x92, 0xff, 0x0d, 0xe2, 0x83, 0x6e, 0x04, 0xb8, 0x73, 0x65, 0x68, 0x31, 0x43, + 0x99, 0x32, 0x60, 0x1a, 0x90, 0x7a, 0x50, 0xa8, 0xbc, 0x00, 0x23, 0x4d, 0xbf, 0xed, 0x45, 0xc2, + 0xd1, 0x4b, 0x38, 0x9d, 0x30, 0x67, 0x8b, 0x55, 0x0d, 0x8e, 0x0d, 0xaa, 0x84, 0x1a, 0x66, 0xe8, + 0xbe, 0xd5, 0x30, 0x6f, 0xc2, 0x88, 0xa7, 0x79, 0x26, 0x0b, 0x79, 0xe0, 0x7c, 0x7e, 0x84, 0x5c, + 0xdd, 0x8f, 0x99, 0xb7, 0x52, 0x87, 0x60, 0x83, 0xdb, 0xf1, 0x7a, 0x80, 0x7d, 0xc9, 0xca, 0x10, + 0xea, 0xb9, 0x2a, 0xe6, 0x43, 0xa6, 0x2a, 0xe6, 0x7c, 0x52, 0x15, 0x93, 0x32, 0x1e, 0x18, 0x5a, + 0x98, 0xde, 0xb3, 0x3b, 0xf5, 0x1a, 0x65, 0xd3, 0x6e, 0xc0, 0xb9, 0x6e, 0xc7, 0x12, 0xf3, 0xf8, + 0xab, 0x2b, 0x53, 0x71, 0xec, 0xf1, 0x57, 0x5f, 0x59, 0xc4, 0x0c, 0xd3, 0x6b, 0xfc, 0x26, 0xfb, + 0xbf, 0x58, 0x50, 0xac, 0xf8, 0xf5, 0x63, 0xb8, 0xf0, 0x7e, 0xd8, 0xb8, 0xf0, 0x3e, 0x9c, 0x7d, + 0x20, 0xd6, 0x73, 0x4d, 0x1f, 0x4b, 0x09, 0xd3, 0xc7, 0x99, 0x3c, 0x06, 0x9d, 0x0d, 0x1d, 0x3f, + 0x5d, 0x84, 0xe1, 0x8a, 0x5f, 0x57, 0xee, 0xf6, 0xff, 0xf4, 0x7e, 0xdc, 0xed, 0x73, 0x73, 0x65, + 0x68, 0x9c, 0x99, 0xa3, 0xa0, 0x7c, 0x69, 0xfc, 0x6d, 0xe6, 0x75, 0x7f, 0x8b, 0xb8, 0x5b, 0xdb, + 0x11, 0xa9, 0x27, 0x3f, 0xe7, 0xf8, 0xbc, 0xee, 0xff, 0xa0, 0x00, 0xe3, 0x89, 0xda, 0x51, 0x03, + 0x46, 0x1b, 0xba, 0x62, 0x5d, 0xcc, 0xd3, 0xfb, 0xd2, 0xc9, 0x0b, 0xaf, 0x65, 0x0d, 0x84, 0x4d, + 0xe6, 0x68, 0x16, 0x40, 0x59, 0x9a, 0xa5, 0x7a, 0x95, 0x49, 0xfd, 0xca, 0x14, 0x1d, 0x62, 0x8d, + 0x02, 0xbd, 0x08, 0xc3, 0x91, 0xdf, 0xf2, 0x1b, 0xfe, 0xd6, 0xde, 0x55, 0x22, 0x43, 0x7b, 0x29, + 0x5f, 0xc4, 0xf5, 0x18, 0x85, 0x75, 0x3a, 0x74, 0x07, 0x26, 0x15, 0x93, 0xea, 0x11, 0x18, 0x1b, + 0x98, 0x56, 0x61, 0x2d, 0xc9, 0x11, 0xa7, 0x2b, 0xb1, 0x7f, 0xae, 0xc8, 0xbb, 0xd8, 0x8b, 0xdc, + 0x77, 0x57, 0xc3, 0x3b, 0x7b, 0x35, 0x7c, 0xdd, 0x82, 0x09, 0x5a, 0x3b, 0x73, 0xb4, 0x92, 0xc7, + 0xbc, 0x8a, 0xc9, 0x6d, 0x75, 0x88, 0xc9, 0x7d, 0x9e, 0xee, 0x9a, 0x75, 0xbf, 0x1d, 0x09, 0xdd, + 0x9d, 0xb6, 0x2d, 0x52, 0x28, 0x16, 0x58, 0x41, 0x47, 0x82, 0x40, 0x3c, 0x0e, 0xd5, 0xe9, 0x48, + 0x10, 0x60, 0x81, 0x95, 0x21, 0xbb, 0xfb, 0xb2, 0x43, 0x76, 0xf3, 0xc8, 0xab, 0xc2, 0x25, 0x47, + 0x08, 0x5c, 0x5a, 0xe4, 0x55, 0xe9, 0xab, 0x13, 0xd3, 0xd8, 0x5f, 0x2d, 0xc2, 0x48, 0xc5, 0xaf, + 0xc7, 0x56, 0xe6, 0x17, 0x0c, 0x2b, 0xf3, 0xb9, 0x84, 0x95, 0x79, 0x42, 0xa7, 0x7d, 0xd7, 0xa6, + 0xfc, 0xad, 0xb2, 0x29, 0xff, 0x9a, 0xc5, 0x46, 0x6d, 0x71, 0xad, 0xca, 0xfd, 0xf6, 0xd0, 0x25, + 0x18, 0x66, 0x1b, 0x0c, 0x7b, 0x8d, 0x2c, 0x4d, 0xaf, 0x2c, 0xdf, 0xd5, 0x5a, 0x0c, 0xc6, 0x3a, + 0x0d, 0xba, 0x00, 0x43, 0x21, 0x71, 0x82, 0xda, 0xb6, 0xda, 0x5d, 0x85, 0x9d, 0x94, 0xc3, 0xb0, + 0xc2, 0xa2, 0xd7, 0xe3, 0xa0, 0x9f, 0xc5, 0xfc, 0xd7, 0x8d, 0x7a, 0x7b, 0xf8, 0x12, 0xc9, 0x8f, + 0xf4, 0x69, 0xdf, 0x02, 0x94, 0xa6, 0xef, 0x21, 0x2c, 0x5d, 0xd9, 0x0c, 0x4b, 0x57, 0x4a, 0x85, + 0xa4, 0xfb, 0x33, 0x0b, 0xc6, 0x2a, 0x7e, 0x9d, 0x2e, 0xdd, 0xef, 0xa4, 0x75, 0xaa, 0x47, 0x3c, + 0x1e, 0xe8, 0x10, 0xf1, 0xf8, 0x31, 0xe8, 0xaf, 0xf8, 0xf5, 0x95, 0x4a, 0xa7, 0xd0, 0x02, 0xf6, + 0xdf, 0xb4, 0x60, 0xb0, 0xe2, 0xd7, 0x8f, 0xc1, 0x2c, 0xf0, 0x21, 0xd3, 0x2c, 0xf0, 0x50, 0xce, + 0xbc, 0xc9, 0xb1, 0x04, 0xfc, 0x8d, 0x3e, 0x18, 0xa5, 0xed, 0xf4, 0xb7, 0xe4, 0x50, 0x1a, 0xdd, + 0x66, 0xf5, 0xd0, 0x6d, 0x54, 0x0a, 0xf7, 0x1b, 0x0d, 0xff, 0x76, 0x72, 0x58, 0x97, 0x19, 0x14, + 0x0b, 0x2c, 0x7a, 0x06, 0x86, 0x5a, 0x01, 0xd9, 0x75, 0x7d, 0x21, 0xde, 0x6a, 0x46, 0x96, 0x8a, + 0x80, 0x63, 0x45, 0x41, 0xaf, 0x85, 0xa1, 0xeb, 0xd1, 0xa3, 0xbc, 0xe6, 0x7b, 0x75, 0xae, 0x39, + 0x2f, 0x8a, 0xb4, 0x1c, 0x1a, 0x1c, 0x1b, 0x54, 0xe8, 0x16, 0x94, 0xd8, 0x7f, 0xb6, 0xed, 0x1c, + 0x3e, 0x7b, 0xaf, 0xc8, 0x2a, 0x28, 0x18, 0xe0, 0x98, 0x17, 0x7a, 0x0e, 0x20, 0x92, 0xa1, 0xed, + 0x43, 0x11, 0x68, 0x4d, 0x5d, 0x05, 0x54, 0xd0, 0xfb, 0x10, 0x6b, 0x54, 0xe8, 0x69, 0x28, 0x45, + 0x8e, 0xdb, 0xb8, 0xe6, 0x7a, 0x24, 0x64, 0x1a, 0xf1, 0xa2, 0x4c, 0xee, 0x27, 0x80, 0x38, 0xc6, + 0x53, 0x51, 0x8c, 0x05, 0xe1, 0xe0, 0xb9, 0xcb, 0x87, 0x18, 0x35, 0x13, 0xc5, 0xae, 0x29, 0x28, + 0xd6, 0x28, 0xd0, 0x36, 0x3c, 0xe2, 0x7a, 0x2c, 0x85, 0x05, 0xa9, 0xee, 0xb8, 0xad, 0xf5, 0x6b, + 0xd5, 0x9b, 0x24, 0x70, 0x37, 0xf7, 0xe6, 0x9d, 0xda, 0x0e, 0xf1, 0x64, 0x5e, 0x56, 0x99, 0xae, + 0xfb, 0x91, 0x95, 0x0e, 0xb4, 0xb8, 0x23, 0x27, 0xfb, 0x79, 0x36, 0xdf, 0xaf, 0x57, 0xd1, 0x7b, + 0x8d, 0xad, 0xe3, 0x94, 0xbe, 0x75, 0x1c, 0xec, 0x97, 0x07, 0xae, 0x57, 0xb5, 0x18, 0x12, 0x2f, + 0xc1, 0xc9, 0x8a, 0x5f, 0xaf, 0xf8, 0x41, 0xb4, 0xec, 0x07, 0xb7, 0x9d, 0xa0, 0x2e, 0xa7, 0x57, + 0x59, 0x46, 0xd1, 0xa0, 0xfb, 0x67, 0x3f, 0xdf, 0x5d, 0x8c, 0x08, 0x19, 0xcf, 0x33, 0x89, 0xed, + 0x90, 0x6f, 0xbf, 0x6a, 0x4c, 0x76, 0x50, 0x49, 0x60, 0x2e, 0x3b, 0x11, 0x41, 0xd7, 0x59, 0xe6, + 0xf5, 0xf8, 0x18, 0x15, 0xc5, 0x9f, 0xd2, 0x32, 0xaf, 0xc7, 0xc8, 0xcc, 0x73, 0xd7, 0x2c, 0x6f, + 0x7f, 0x56, 0x54, 0xc2, 0xef, 0xe0, 0xdc, 0xbf, 0xae, 0x97, 0xd4, 0xc5, 0x32, 0x4b, 0x44, 0x21, + 0x3f, 0xbd, 0x00, 0xb7, 0x7a, 0x76, 0xcc, 0x12, 0x61, 0xbf, 0x08, 0x93, 0xf4, 0xea, 0xa7, 0xe4, + 0x28, 0xf6, 0x91, 0xdd, 0xa3, 0x79, 0xfc, 0xd7, 0x7e, 0x76, 0x0e, 0x24, 0xd2, 0x9f, 0xa0, 0x4f, + 0xc2, 0x58, 0x48, 0xae, 0xb9, 0x5e, 0xfb, 0x8e, 0x54, 0xbc, 0x74, 0x78, 0x73, 0x58, 0x5d, 0xd2, + 0x29, 0xb9, 0xfa, 0xd6, 0x84, 0xe1, 0x04, 0x37, 0xd4, 0x84, 0xb1, 0xdb, 0xae, 0x57, 0xf7, 0x6f, + 0x87, 0x92, 0xff, 0x50, 0xbe, 0x16, 0xf7, 0x16, 0xa7, 0x4c, 0xb4, 0xd1, 0xa8, 0xee, 0x96, 0xc1, + 0x0c, 0x27, 0x98, 0xd3, 0xb5, 0x16, 0xb4, 0xbd, 0xb9, 0xf0, 0x46, 0x48, 0x02, 0x91, 0xf9, 0x9f, + 0xa7, 0xe5, 0x95, 0x40, 0x1c, 0xe3, 0xe9, 0x5a, 0x63, 0x7f, 0x2e, 0x07, 0x7e, 0x9b, 0xe7, 0xda, + 0x10, 0x6b, 0x0d, 0x2b, 0x28, 0xd6, 0x28, 0xe8, 0x5e, 0xc4, 0xfe, 0xad, 0xf9, 0x1e, 0xf6, 0xfd, + 0x48, 0xee, 0x5e, 0xcc, 0x13, 0x41, 0x83, 0x63, 0x83, 0x0a, 0x2d, 0x03, 0x0a, 0xdb, 0xad, 0x56, + 0x83, 0x39, 0x33, 0x39, 0x0d, 0xc6, 0x8a, 0x7b, 0x79, 0x14, 0x79, 0xac, 0xe0, 0x6a, 0x0a, 0x8b, + 0x33, 0x4a, 0xd0, 0x63, 0x69, 0x53, 0x34, 0xb5, 0x9f, 0x35, 0x95, 0x5b, 0x7c, 0xaa, 0xbc, 0x9d, + 0x12, 0x87, 0x96, 0x60, 0x30, 0xdc, 0x0b, 0x6b, 0x91, 0x08, 0xed, 0x98, 0x93, 0x46, 0xab, 0xca, + 0x48, 0xb4, 0x2c, 0x8e, 0xbc, 0x08, 0x96, 0x65, 0x51, 0x0d, 0xa6, 0x04, 0xc7, 0x85, 0x6d, 0xc7, + 0x53, 0xf9, 0x82, 0xb8, 0x4f, 0xf7, 0xa5, 0x7b, 0xfb, 0xe5, 0x29, 0x51, 0xb3, 0x8e, 0x3e, 0xd8, + 0x2f, 0x9f, 0xaa, 0xf8, 0xf5, 0x0c, 0x0c, 0xce, 0xe2, 0xc6, 0x27, 0x5f, 0xad, 0xe6, 0x37, 0x5b, + 0x95, 0xc0, 0xdf, 0x74, 0x1b, 0xa4, 0x93, 0xd5, 0xac, 0x6a, 0x50, 0x8a, 0xc9, 0x67, 0xc0, 0x70, + 0x82, 0x9b, 0xfd, 0x59, 0x26, 0xba, 0xb1, 0x64, 0xf1, 0x51, 0x3b, 0x20, 0xa8, 0x09, 0xa3, 0x2d, + 0xb6, 0xb8, 0x45, 0x06, 0x0c, 0x31, 0xd7, 0x5f, 0xe8, 0x51, 0xfb, 0x73, 0x9b, 0xe5, 0xf5, 0x32, + 0x3c, 0xa3, 0x2a, 0x3a, 0x3b, 0x6c, 0x72, 0xb7, 0xff, 0xf5, 0x69, 0x76, 0xf8, 0x57, 0xb9, 0x4a, + 0x67, 0x50, 0x3c, 0x21, 0x11, 0xb7, 0xc8, 0x99, 0x7c, 0xdd, 0x62, 0x3c, 0x2c, 0xe2, 0x19, 0x0a, + 0x96, 0x65, 0xd1, 0x27, 0x60, 0x8c, 0x5e, 0xca, 0xd4, 0x01, 0x1c, 0x4e, 0x9f, 0xc8, 0x0f, 0xf5, + 0xa1, 0xa8, 0xf4, 0xec, 0x38, 0x7a, 0x61, 0x9c, 0x60, 0x86, 0x5e, 0x67, 0x9e, 0x48, 0x92, 0x75, + 0xa1, 0x17, 0xd6, 0xba, 0xd3, 0x91, 0x64, 0xab, 0x31, 0x41, 0x6d, 0x98, 0x4a, 0x27, 0xec, 0x0b, + 0xa7, 0xed, 0x7c, 0xe9, 0x36, 0x9d, 0x73, 0x2f, 0x4e, 0x63, 0x92, 0xc6, 0x85, 0x38, 0x8b, 0x3f, + 0xba, 0x06, 0xa3, 0x22, 0x63, 0xba, 0x98, 0xb9, 0x45, 0x43, 0xe5, 0x39, 0x8a, 0x75, 0xe4, 0x41, + 0x12, 0x80, 0xcd, 0xc2, 0x68, 0x0b, 0xce, 0x68, 0x49, 0xae, 0x2e, 0x07, 0x0e, 0xf3, 0x5b, 0x70, + 0xd9, 0x76, 0xaa, 0x89, 0x25, 0x8f, 0xde, 0xdb, 0x2f, 0x9f, 0x59, 0xef, 0x44, 0x88, 0x3b, 0xf3, + 0x41, 0xd7, 0xe1, 0x24, 0x7f, 0xa8, 0xbe, 0x48, 0x9c, 0x7a, 0xc3, 0xf5, 0x94, 0xdc, 0xc3, 0x97, + 0xfc, 0xe9, 0x7b, 0xfb, 0xe5, 0x93, 0x73, 0x59, 0x04, 0x38, 0xbb, 0x1c, 0xfa, 0x10, 0x94, 0xea, + 0x5e, 0x28, 0xfa, 0x60, 0xc0, 0xc8, 0x23, 0x56, 0x5a, 0x5c, 0xab, 0xaa, 0xef, 0x8f, 0xff, 0xe0, + 0xb8, 0x00, 0xda, 0xe2, 0x6a, 0x71, 0xa5, 0xac, 0x19, 0x4c, 0x05, 0xea, 0x4a, 0xea, 0x33, 0x8d, + 0xa7, 0xaa, 0xdc, 0x1e, 0xa4, 0x5e, 0x70, 0x18, 0xaf, 0x58, 0x0d, 0xc6, 0xe8, 0x35, 0x40, 0x22, + 0x5e, 0xfd, 0x5c, 0x8d, 0xa5, 0x57, 0x61, 0x56, 0x84, 0x21, 0xf3, 0xf1, 0x64, 0x35, 0x45, 0x81, + 0x33, 0x4a, 0xa1, 0x2b, 0x74, 0x57, 0xd1, 0xa1, 0x62, 0xd7, 0x52, 0xa9, 0x25, 0x17, 0x49, 0x2b, + 0x20, 0xcc, 0x0f, 0xcb, 0xe4, 0x88, 0x13, 0xe5, 0x50, 0x1d, 0x1e, 0x71, 0xda, 0x91, 0xcf, 0x2c, + 0x0e, 0x26, 0xe9, 0xba, 0xbf, 0x43, 0x3c, 0x66, 0xec, 0x1b, 0x9a, 0x3f, 0x47, 0x05, 0xab, 0xb9, + 0x0e, 0x74, 0xb8, 0x23, 0x17, 0x2a, 0x10, 0xab, 0x5c, 0xd2, 0x60, 0x86, 0x1f, 0xcb, 0xc8, 0x27, + 0xfd, 0x22, 0x0c, 0x6f, 0xfb, 0x61, 0xb4, 0x46, 0xa2, 0xdb, 0x7e, 0xb0, 0x23, 0xc2, 0xe8, 0xc6, + 0x41, 0xc9, 0x63, 0x14, 0xd6, 0xe9, 0xe8, 0x8d, 0x97, 0xb9, 0xa2, 0xac, 0x2c, 0x32, 0x2f, 0x80, + 0xa1, 0x78, 0x8f, 0xb9, 0xc2, 0xc1, 0x58, 0xe2, 0x25, 0xe9, 0x4a, 0x65, 0x81, 0x59, 0xf4, 0x13, + 0xa4, 0x2b, 0x95, 0x05, 0x2c, 0xf1, 0x74, 0xba, 0x86, 0xdb, 0x4e, 0x40, 0x2a, 0x81, 0x5f, 0x23, + 0xa1, 0x16, 0x0a, 0xff, 0x61, 0x1e, 0x24, 0x98, 0x4e, 0xd7, 0x6a, 0x16, 0x01, 0xce, 0x2e, 0x87, + 0x48, 0x3a, 0xc1, 0xdb, 0x58, 0xbe, 0x29, 0x26, 0x2d, 0xcf, 0xf4, 0x98, 0xe3, 0xcd, 0x83, 0x09, + 0x95, 0x5a, 0x8e, 0x87, 0x05, 0x0e, 0xa7, 0xc7, 0xd9, 0xdc, 0xee, 0x3d, 0xa6, 0xb0, 0x32, 0x6e, + 0xad, 0x24, 0x38, 0xe1, 0x14, 0x6f, 0x23, 0xc2, 0xdc, 0x44, 0xd7, 0x08, 0x73, 0x17, 0xa1, 0x14, + 0xb6, 0x37, 0xea, 0x7e, 0xd3, 0x71, 0x3d, 0x66, 0xd1, 0xd7, 0xae, 0x5e, 0x55, 0x89, 0xc0, 0x31, + 0x0d, 0x5a, 0x86, 0x21, 0x47, 0x5a, 0xae, 0x50, 0x7e, 0x4c, 0x21, 0x65, 0xaf, 0xe2, 0x61, 0x36, + 0xa4, 0xad, 0x4a, 0x95, 0x45, 0xaf, 0xc0, 0xa8, 0x78, 0x68, 0x2d, 0x52, 0xa7, 0x4e, 0x99, 0xaf, + 0xe1, 0xaa, 0x3a, 0x12, 0x9b, 0xb4, 0xe8, 0x06, 0x0c, 0x47, 0x7e, 0x83, 0x3d, 0xe9, 0xa2, 0x62, + 0xde, 0xa9, 0xfc, 0xe8, 0x78, 0xeb, 0x8a, 0x4c, 0x57, 0x1a, 0xab, 0xa2, 0x58, 0xe7, 0x83, 0xd6, + 0xf9, 0x7c, 0x67, 0x81, 0xef, 0x49, 0x28, 0x72, 0x6f, 0x9e, 0xc9, 0x73, 0xc7, 0x62, 0x64, 0xe6, + 0x72, 0x10, 0x25, 0xb1, 0xce, 0x06, 0x5d, 0x86, 0xc9, 0x56, 0xe0, 0xfa, 0x6c, 0x4e, 0x28, 0xa3, + 0xe5, 0xb4, 0x99, 0xe6, 0xaa, 0x92, 0x24, 0xc0, 0xe9, 0x32, 0xec, 0x9d, 0xbc, 0x00, 0x4e, 0x9f, + 0xe6, 0xa9, 0x3a, 0xf8, 0x4d, 0x96, 0xc3, 0xb0, 0xc2, 0xa2, 0x55, 0xb6, 0x13, 0x73, 0x25, 0xcc, + 0xf4, 0x4c, 0x7e, 0x18, 0x23, 0x5d, 0x59, 0xc3, 0x85, 0x57, 0xf5, 0x17, 0xc7, 0x1c, 0x50, 0x5d, + 0xcb, 0x90, 0x49, 0xaf, 0x00, 0xe1, 0xf4, 0x23, 0x1d, 0xfc, 0x01, 0x13, 0x97, 0xa2, 0x58, 0x20, + 0x30, 0xc0, 0x21, 0x4e, 0xf0, 0x44, 0x1f, 0x81, 0x09, 0x11, 0x7c, 0x31, 0xee, 0xa6, 0x33, 0xb1, + 0xa3, 0x3c, 0x4e, 0xe0, 0x70, 0x8a, 0x9a, 0xa7, 0xca, 0x70, 0x36, 0x1a, 0x44, 0x6c, 0x7d, 0xd7, + 0x5c, 0x6f, 0x27, 0x9c, 0x3e, 0xcb, 0xf6, 0x07, 0x91, 0x2a, 0x23, 0x89, 0xc5, 0x19, 0x25, 0xd0, + 0x3a, 0x4c, 0xb4, 0x02, 0x42, 0x9a, 0x4c, 0xd0, 0x17, 0xe7, 0x59, 0x99, 0x87, 0x89, 0xa0, 0x2d, + 0xa9, 0x24, 0x70, 0x07, 0x19, 0x30, 0x9c, 0xe2, 0x80, 0x6e, 0xc3, 0x90, 0xbf, 0x4b, 0x82, 0x6d, + 0xe2, 0xd4, 0xa7, 0xcf, 0x75, 0x78, 0xb8, 0x21, 0x0e, 0xb7, 0xeb, 0x82, 0x36, 0xe1, 0xe8, 0x20, + 0xc1, 0xdd, 0x1d, 0x1d, 0x64, 0x65, 0xe8, 0x2f, 0x5a, 0x70, 0x5a, 0xda, 0x46, 0xaa, 0x2d, 0xda, + 0xeb, 0x0b, 0xbe, 0x17, 0x46, 0x01, 0x0f, 0x6c, 0xf0, 0x68, 0xfe, 0x63, 0xff, 0xf5, 0x9c, 0x42, + 0x4a, 0x0f, 0x7c, 0x3a, 0x8f, 0x22, 0xc4, 0xf9, 0x35, 0xa2, 0x05, 0x98, 0x0c, 0x49, 0x24, 0x37, + 0xa3, 0xb9, 0x70, 0xf9, 0xf5, 0xc5, 0xb5, 0xe9, 0xc7, 0x78, 0x54, 0x06, 0xba, 0x18, 0xaa, 0x49, + 0x24, 0x4e, 0xd3, 0xa3, 0x4b, 0x50, 0xf0, 0xc3, 0xe9, 0xc7, 0x3b, 0x24, 0x55, 0xf5, 0xeb, 0xd7, + 0xab, 0xdc, 0xe1, 0xed, 0x7a, 0x15, 0x17, 0xfc, 0x50, 0xa6, 0xab, 0xa0, 0xf7, 0xb1, 0x70, 0xfa, + 0x09, 0xae, 0x35, 0x94, 0xe9, 0x2a, 0x18, 0x10, 0xc7, 0x78, 0xb4, 0x0d, 0xe3, 0xa1, 0x71, 0xef, + 0x0d, 0xa7, 0xcf, 0xb3, 0x9e, 0x7a, 0x22, 0x6f, 0xd0, 0x0c, 0x6a, 0x2d, 0xda, 0xbc, 0xc9, 0x05, + 0x27, 0xd9, 0xf2, 0xd5, 0xa5, 0x5d, 0xf0, 0xc3, 0xe9, 0x27, 0xbb, 0xac, 0x2e, 0x8d, 0x58, 0x5f, + 0x5d, 0x3a, 0x0f, 0x9c, 0xe0, 0x39, 0xf3, 0x5d, 0x30, 0x99, 0x12, 0x97, 0x0e, 0x93, 0x89, 0x69, + 0x66, 0x07, 0x46, 0x8d, 0x29, 0xf9, 0x40, 0x1d, 0x0b, 0xbe, 0x67, 0x08, 0x4a, 0xca, 0xe8, 0x8c, + 0x2e, 0x9a, 0xbe, 0x04, 0xa7, 0x93, 0xbe, 0x04, 0x43, 0x15, 0xbf, 0x6e, 0xb8, 0x0f, 0xac, 0x67, + 0xc4, 0xee, 0xcb, 0xdb, 0x00, 0x7b, 0x7f, 0xd3, 0xa0, 0x69, 0xf2, 0x8b, 0x3d, 0x3b, 0x25, 0xf4, + 0x75, 0x34, 0x0e, 0x5c, 0x86, 0x49, 0xcf, 0x67, 0x32, 0x3a, 0xa9, 0x4b, 0x01, 0x8c, 0xc9, 0x59, + 0x25, 0x3d, 0x18, 0x4e, 0x82, 0x00, 0xa7, 0xcb, 0xd0, 0x0a, 0xb9, 0xa0, 0x94, 0xb4, 0x46, 0x70, + 0x39, 0x0a, 0x0b, 0x2c, 0x7a, 0x0c, 0xfa, 0x5b, 0x7e, 0x7d, 0xa5, 0x22, 0xe4, 0x73, 0x2d, 0x62, + 0x6c, 0x7d, 0xa5, 0x82, 0x39, 0x0e, 0xcd, 0xc1, 0x00, 0xfb, 0x11, 0x4e, 0x8f, 0xe4, 0x47, 0x3d, + 0x61, 0x25, 0xb4, 0x3c, 0x57, 0xac, 0x00, 0x16, 0x05, 0x99, 0x56, 0x94, 0x5e, 0x6a, 0x98, 0x56, + 0x74, 0xf0, 0x3e, 0xb5, 0xa2, 0x92, 0x01, 0x8e, 0x79, 0xa1, 0x3b, 0x70, 0xd2, 0xb8, 0x48, 0xf2, + 0x29, 0x42, 0x42, 0x11, 0x79, 0xe1, 0xb1, 0x8e, 0x37, 0x48, 0xe1, 0xc4, 0x70, 0x46, 0x34, 0xfa, + 0xe4, 0x4a, 0x16, 0x27, 0x9c, 0x5d, 0x01, 0x6a, 0xc0, 0x64, 0x2d, 0x55, 0xeb, 0x50, 0xef, 0xb5, + 0xaa, 0x01, 0x4d, 0xd7, 0x98, 0x66, 0x8c, 0x5e, 0x81, 0xa1, 0xb7, 0xfc, 0x90, 0x9d, 0x6d, 0xe2, + 0x4e, 0x21, 0x9f, 0xed, 0x0f, 0xbd, 0x7e, 0xbd, 0xca, 0xe0, 0x07, 0xfb, 0xe5, 0xe1, 0x8a, 0x5f, + 0x97, 0x7f, 0xb1, 0x2a, 0x80, 0x7e, 0xc0, 0x82, 0x99, 0xf4, 0x4d, 0x55, 0x35, 0x7a, 0xb4, 0xf7, + 0x46, 0xdb, 0xa2, 0xd2, 0x99, 0xa5, 0x5c, 0x76, 0xb8, 0x43, 0x55, 0xe8, 0x83, 0x74, 0x21, 0x84, + 0xee, 0x5d, 0x22, 0x92, 0x84, 0x3e, 0x1a, 0x2f, 0x04, 0x0a, 0x3d, 0xd8, 0x2f, 0x8f, 0xf3, 0x2d, + 0x2d, 0x7e, 0x37, 0x23, 0x0a, 0xd8, 0xbf, 0x6c, 0x31, 0xb5, 0xac, 0x80, 0x92, 0xb0, 0xdd, 0x38, + 0x8e, 0xcc, 0xc0, 0x4b, 0x86, 0xc9, 0xf3, 0xbe, 0xfd, 0x61, 0xfe, 0x89, 0xc5, 0xfc, 0x61, 0x8e, + 0xf1, 0xe1, 0xcb, 0xeb, 0x30, 0x14, 0xc9, 0x8c, 0xcd, 0x1d, 0x92, 0x19, 0x6b, 0x8d, 0x62, 0x3e, + 0x41, 0xea, 0x72, 0xa0, 0x92, 0x33, 0x2b, 0x36, 0xf6, 0x3f, 0xe4, 0x23, 0x20, 0x31, 0xc7, 0x60, + 0x59, 0x5a, 0x34, 0x2d, 0x4b, 0xe5, 0x2e, 0x5f, 0x90, 0x63, 0x61, 0xfa, 0x07, 0x66, 0xbb, 0x99, + 0x52, 0xec, 0x9d, 0xee, 0x88, 0x65, 0x7f, 0xde, 0x02, 0x88, 0x63, 0x79, 0xf7, 0x90, 0x93, 0xef, + 0x25, 0x7a, 0x1d, 0xf0, 0x23, 0xbf, 0xe6, 0x37, 0x84, 0xdd, 0xf4, 0x91, 0xd8, 0xb8, 0xc5, 0xe1, + 0x07, 0xda, 0x6f, 0xac, 0xa8, 0x51, 0x59, 0x46, 0x0e, 0x2c, 0xc6, 0xe6, 0x56, 0x23, 0x6a, 0xe0, + 0x17, 0x2d, 0x38, 0x91, 0xe5, 0x45, 0x4d, 0x2f, 0x97, 0x5c, 0x3d, 0xa8, 0x9c, 0xe4, 0xd4, 0x68, + 0xde, 0x14, 0x70, 0xac, 0x28, 0x7a, 0x4e, 0x76, 0x78, 0xb8, 0x20, 0xda, 0xd7, 0x61, 0xb4, 0x12, + 0x10, 0xed, 0x5c, 0x7e, 0x95, 0x47, 0xa3, 0xe0, 0xed, 0x79, 0xe6, 0xd0, 0x91, 0x28, 0xec, 0x2f, + 0x17, 0xe0, 0x04, 0xf7, 0x35, 0x99, 0xdb, 0xf5, 0xdd, 0x7a, 0xc5, 0xaf, 0x8b, 0xb7, 0x72, 0x6f, + 0xc0, 0x48, 0x4b, 0xd3, 0xe9, 0x76, 0x0a, 0x08, 0xab, 0xeb, 0x7e, 0x63, 0x2d, 0x94, 0x0e, 0xc5, + 0x06, 0x2f, 0x54, 0x87, 0x11, 0xb2, 0xeb, 0xd6, 0x94, 0xc3, 0x42, 0xe1, 0xd0, 0x67, 0xa4, 0xaa, + 0x65, 0x49, 0xe3, 0x83, 0x0d, 0xae, 0x0f, 0x20, 0x05, 0xb9, 0xfd, 0x63, 0x16, 0x3c, 0x94, 0x13, + 0x3e, 0x96, 0x56, 0x77, 0x9b, 0x79, 0xf5, 0x88, 0x69, 0xab, 0xaa, 0xe3, 0xbe, 0x3e, 0x58, 0x60, + 0xd1, 0x47, 0x01, 0xb8, 0xaf, 0x0e, 0xf1, 0x6a, 0x5d, 0xe3, 0x6c, 0x1a, 0x21, 0x02, 0xb5, 0x68, + 0x6f, 0xb2, 0x3c, 0xd6, 0x78, 0xd9, 0x5f, 0xec, 0x83, 0x7e, 0xe6, 0x1b, 0x82, 0x2a, 0x30, 0xb8, + 0xcd, 0x13, 0x02, 0x75, 0x1c, 0x37, 0x4a, 0x2b, 0x73, 0x0c, 0xc5, 0xe3, 0xa6, 0x41, 0xb1, 0x64, + 0x83, 0x56, 0x61, 0x8a, 0xe7, 0x65, 0x6a, 0x2c, 0x92, 0x86, 0xb3, 0x27, 0xd5, 0xa5, 0x3c, 0x89, + 0xb0, 0x52, 0x1b, 0xaf, 0xa4, 0x49, 0x70, 0x56, 0x39, 0xf4, 0x2a, 0x8c, 0xd1, 0xeb, 0xab, 0xdf, + 0x8e, 0x24, 0x27, 0x9e, 0x91, 0x49, 0x49, 0xf4, 0xeb, 0x06, 0x16, 0x27, 0xa8, 0xd1, 0x2b, 0x30, + 0xda, 0x4a, 0x29, 0x86, 0xfb, 0x63, 0x0d, 0x8a, 0xa9, 0x0c, 0x36, 0x69, 0x99, 0x23, 0x75, 0x9b, + 0xb9, 0x8d, 0xaf, 0x6f, 0x07, 0x24, 0xdc, 0xf6, 0x1b, 0x75, 0x26, 0x39, 0xf6, 0x6b, 0x8e, 0xd4, + 0x09, 0x3c, 0x4e, 0x95, 0xa0, 0x5c, 0x36, 0x1d, 0xb7, 0xd1, 0x0e, 0x48, 0xcc, 0x65, 0xc0, 0xe4, + 0xb2, 0x9c, 0xc0, 0xe3, 0x54, 0x89, 0xee, 0x1a, 0xef, 0xc1, 0xa3, 0xd1, 0x78, 0xdb, 0x3f, 0x53, + 0x00, 0x63, 0x68, 0xbf, 0x73, 0x33, 0x45, 0xd1, 0x2f, 0xdb, 0x0a, 0x5a, 0x35, 0xe1, 0x07, 0x95, + 0xf9, 0x65, 0x71, 0x02, 0x58, 0xfe, 0x65, 0xf4, 0x3f, 0x66, 0xa5, 0xe8, 0x1a, 0x3f, 0x59, 0x09, + 0x7c, 0x7a, 0xc8, 0xc9, 0x78, 0x65, 0xea, 0xbd, 0xc2, 0xa0, 0x7c, 0xcb, 0xdd, 0x21, 0xb2, 0xa7, + 0xf0, 0xe8, 0xe6, 0x1c, 0x0c, 0x97, 0xa1, 0xaa, 0x08, 0xaa, 0x20, 0xb9, 0xa0, 0x4b, 0x30, 0x2c, + 0xd2, 0xff, 0x30, 0xb7, 0x7a, 0xbe, 0x98, 0x98, 0x8b, 0xd3, 0x62, 0x0c, 0xc6, 0x3a, 0x8d, 0xfd, + 0x83, 0x05, 0x98, 0xca, 0x78, 0x17, 0xc5, 0x8f, 0x91, 0x2d, 0x37, 0x8c, 0x54, 0x8e, 0x59, 0xed, + 0x18, 0xe1, 0x70, 0xac, 0x28, 0xe8, 0x5e, 0xc5, 0x0f, 0xaa, 0xe4, 0xe1, 0x24, 0xde, 0x1d, 0x08, + 0xec, 0x21, 0xb3, 0xb5, 0x9e, 0x83, 0xbe, 0x76, 0x48, 0x64, 0x4c, 0x5e, 0x75, 0x6c, 0x33, 0x73, + 0x30, 0xc3, 0xd0, 0x1b, 0xd8, 0x96, 0xb2, 0xac, 0x6a, 0x37, 0x30, 0x6e, 0x5b, 0xe5, 0x38, 0xda, + 0xb8, 0x88, 0x78, 0x8e, 0x17, 0x89, 0x7b, 0x5a, 0x1c, 0x5c, 0x92, 0x41, 0xb1, 0xc0, 0xda, 0x5f, + 0x28, 0xc2, 0xe9, 0xdc, 0x97, 0x92, 0xb4, 0xe9, 0x4d, 0xdf, 0x73, 0x23, 0x5f, 0xf9, 0x8e, 0xf1, + 0x80, 0x92, 0xa4, 0xb5, 0xbd, 0x2a, 0xe0, 0x58, 0x51, 0xa0, 0xf3, 0xd0, 0xcf, 0x94, 0xc9, 0xa9, + 0x6c, 0xbb, 0xf3, 0x8b, 0x3c, 0xc2, 0x18, 0x47, 0xf7, 0x9c, 0x20, 0xfd, 0x31, 0x2a, 0xc1, 0xf8, + 0x8d, 0xe4, 0x81, 0x42, 0x9b, 0xeb, 0xfb, 0x0d, 0xcc, 0x90, 0xe8, 0x09, 0xd1, 0x5f, 0x09, 0x67, + 0x29, 0xec, 0xd4, 0xfd, 0x50, 0xeb, 0xb4, 0xa7, 0x60, 0x70, 0x87, 0xec, 0x05, 0xae, 0xb7, 0x95, + 0x74, 0xa2, 0xbb, 0xca, 0xc1, 0x58, 0xe2, 0xcd, 0xf4, 0x90, 0x83, 0x47, 0x9d, 0xd9, 0x7c, 0xa8, + 0xab, 0x78, 0xf2, 0xc3, 0x45, 0x18, 0xc7, 0xf3, 0x8b, 0xef, 0x0e, 0xc4, 0x8d, 0xf4, 0x40, 0x1c, + 0x75, 0x66, 0xf3, 0xee, 0xa3, 0xf1, 0x0b, 0x16, 0x8c, 0xb3, 0x24, 0x44, 0x22, 0x1e, 0x82, 0xeb, + 0x7b, 0xc7, 0x70, 0x15, 0x78, 0x0c, 0xfa, 0x03, 0x5a, 0x69, 0x32, 0xcd, 0x2e, 0x6b, 0x09, 0xe6, + 0x38, 0xf4, 0x08, 0xf4, 0xb1, 0x26, 0xd0, 0xc1, 0x1b, 0xe1, 0x5b, 0xf0, 0xa2, 0x13, 0x39, 0x98, + 0x41, 0x59, 0x7c, 0x2d, 0x4c, 0x5a, 0x0d, 0x97, 0x37, 0x3a, 0x36, 0xf5, 0xbf, 0x33, 0x62, 0x28, + 0x64, 0x36, 0xed, 0xed, 0xc5, 0xd7, 0xca, 0x66, 0xd9, 0xf9, 0x9a, 0xfd, 0xc7, 0x05, 0x38, 0x9b, + 0x59, 0xae, 0xe7, 0xf8, 0x5a, 0x9d, 0x4b, 0x3f, 0xc8, 0x34, 0x33, 0xc5, 0x63, 0x74, 0x51, 0xee, + 0xeb, 0x55, 0xfa, 0xef, 0xef, 0x21, 0xec, 0x55, 0x66, 0x97, 0xbd, 0x43, 0xc2, 0x5e, 0x65, 0xb6, + 0x2d, 0x47, 0x4d, 0xf0, 0xe7, 0x85, 0x9c, 0x6f, 0x61, 0x0a, 0x83, 0x0b, 0x74, 0x9f, 0x61, 0xc8, + 0x50, 0x5e, 0xc2, 0xf9, 0x1e, 0xc3, 0x61, 0x58, 0x61, 0xd1, 0x1c, 0x8c, 0x37, 0x5d, 0x8f, 0x6e, + 0x3e, 0x7b, 0xa6, 0x28, 0xae, 0x6c, 0x00, 0xab, 0x26, 0x1a, 0x27, 0xe9, 0x91, 0xab, 0x85, 0xc4, + 0xe2, 0x5f, 0xf7, 0xca, 0xa1, 0x56, 0xdd, 0xac, 0xe9, 0x06, 0xa1, 0x7a, 0x31, 0x23, 0x3c, 0xd6, + 0xaa, 0xa6, 0x27, 0x2a, 0xf6, 0xae, 0x27, 0x1a, 0xc9, 0xd6, 0x11, 0xcd, 0xbc, 0x02, 0xa3, 0xf7, + 0x6d, 0x53, 0xb0, 0xbf, 0x5e, 0x84, 0x87, 0x3b, 0x2c, 0x7b, 0xbe, 0xd7, 0x1b, 0x63, 0xa0, 0xed, + 0xf5, 0xa9, 0x71, 0xa8, 0xc0, 0x89, 0xcd, 0x76, 0xa3, 0xb1, 0xc7, 0x5e, 0xee, 0x90, 0xba, 0xa4, + 0x10, 0x32, 0xa5, 0x54, 0x8e, 0x9c, 0x58, 0xce, 0xa0, 0xc1, 0x99, 0x25, 0xe9, 0x15, 0x8b, 0x9e, + 0x24, 0x7b, 0x8a, 0x55, 0xe2, 0x8a, 0x85, 0x75, 0x24, 0x36, 0x69, 0xd1, 0x65, 0x98, 0x74, 0x76, + 0x1d, 0x97, 0xc7, 0x15, 0x97, 0x0c, 0xf8, 0x1d, 0x4b, 0xa9, 0x82, 0xe7, 0x92, 0x04, 0x38, 0x5d, + 0x06, 0xbd, 0x06, 0xc8, 0xdf, 0x60, 0xfe, 0xfd, 0xf5, 0xcb, 0xc4, 0x13, 0xd6, 0x6a, 0x36, 0x76, + 0xc5, 0x78, 0x4b, 0xb8, 0x9e, 0xa2, 0xc0, 0x19, 0xa5, 0x12, 0xf1, 0x9f, 0x06, 0xf2, 0xe3, 0x3f, + 0x75, 0xde, 0x17, 0xbb, 0x66, 0x38, 0xba, 0x04, 0xa3, 0x87, 0xf4, 0x5a, 0xb5, 0xff, 0x83, 0x45, + 0x4f, 0x3c, 0x5e, 0xc6, 0x0c, 0xae, 0xfa, 0x0a, 0x73, 0xab, 0xe5, 0x9a, 0x65, 0x2d, 0xc0, 0xce, + 0x49, 0xcd, 0xad, 0x36, 0x46, 0x62, 0x93, 0x96, 0xcf, 0x21, 0xcd, 0x1d, 0xd6, 0xb8, 0x15, 0x88, + 0x08, 0x70, 0x8a, 0x02, 0x7d, 0x0c, 0x06, 0xeb, 0xee, 0xae, 0x1b, 0x0a, 0xe5, 0xd8, 0xa1, 0x8d, + 0x58, 0xf1, 0xd6, 0xb9, 0xc8, 0xd9, 0x60, 0xc9, 0xcf, 0xfe, 0xe1, 0x42, 0xdc, 0x27, 0xaf, 0xb7, + 0xfd, 0xc8, 0x39, 0x86, 0x93, 0xfc, 0xb2, 0x71, 0x92, 0x3f, 0xd1, 0x29, 0x0c, 0x1e, 0x6b, 0x52, + 0xee, 0x09, 0x7e, 0x3d, 0x71, 0x82, 0x3f, 0xd9, 0x9d, 0x55, 0xe7, 0x93, 0xfb, 0x1f, 0x59, 0x30, + 0x69, 0xd0, 0x1f, 0xc3, 0x01, 0xb2, 0x6c, 0x1e, 0x20, 0x8f, 0x76, 0xfd, 0x86, 0x9c, 0x83, 0xe3, + 0xfb, 0x8a, 0x89, 0xb6, 0xb3, 0x03, 0xe3, 0x2d, 0xe8, 0xdb, 0x76, 0x82, 0x7a, 0xa7, 0xb4, 0x1f, + 0xa9, 0x42, 0xb3, 0x57, 0x9c, 0x40, 0x58, 0xf8, 0x9f, 0x91, 0xbd, 0x4e, 0x41, 0x5d, 0xad, 0xfb, + 0xac, 0x2a, 0xf4, 0x12, 0x0c, 0x84, 0x35, 0xbf, 0xa5, 0x9e, 0xfa, 0x9c, 0x63, 0x1d, 0xcd, 0x20, + 0x07, 0xfb, 0x65, 0x64, 0x56, 0x47, 0xc1, 0x58, 0xd0, 0xa3, 0x37, 0x60, 0x94, 0xfd, 0x52, 0xee, + 0x76, 0xc5, 0x7c, 0x0d, 0x46, 0x55, 0x27, 0xe4, 0xbe, 0xa8, 0x06, 0x08, 0x9b, 0xac, 0x66, 0xb6, + 0xa0, 0xa4, 0x3e, 0xeb, 0x81, 0x5a, 0x89, 0xff, 0x6d, 0x11, 0xa6, 0x32, 0xe6, 0x1c, 0x0a, 0x8d, + 0x91, 0xb8, 0xd4, 0xe3, 0x54, 0x7d, 0x9b, 0x63, 0x11, 0xb2, 0x0b, 0x54, 0x5d, 0xcc, 0xad, 0x9e, + 0x2b, 0xbd, 0x11, 0x92, 0x64, 0xa5, 0x14, 0xd4, 0xbd, 0x52, 0x5a, 0xd9, 0xb1, 0x75, 0x35, 0xad, + 0x48, 0xb5, 0xf4, 0x81, 0x8e, 0xe9, 0xaf, 0xf5, 0xc1, 0x89, 0xac, 0xc8, 0x9c, 0xe8, 0x33, 0x89, + 0xa4, 0xb3, 0x2f, 0xf4, 0x1a, 0xd3, 0x93, 0x67, 0xa2, 0x15, 0x11, 0x03, 0x67, 0xcd, 0x34, 0xb4, + 0x5d, 0xbb, 0x59, 0xd4, 0xc9, 0x62, 0x96, 0x04, 0x3c, 0x59, 0xb0, 0xdc, 0x3e, 0xde, 0xdf, 0x73, + 0x03, 0x44, 0x96, 0xe1, 0x30, 0xe1, 0xca, 0x23, 0xc1, 0xdd, 0x5d, 0x79, 0x64, 0xcd, 0x68, 0x05, + 0x06, 0x6a, 0xdc, 0x47, 0xa4, 0xd8, 0x7d, 0x0b, 0xe3, 0x0e, 0x22, 0x6a, 0x03, 0x16, 0x8e, 0x21, + 0x82, 0xc1, 0x8c, 0x0b, 0xc3, 0x5a, 0xc7, 0x3c, 0xd0, 0xc9, 0xb3, 0x43, 0x0f, 0x3e, 0xad, 0x0b, + 0x1e, 0xe8, 0x04, 0xfa, 0x31, 0x0b, 0x12, 0x0f, 0x45, 0x94, 0x52, 0xce, 0xca, 0x55, 0xca, 0x9d, + 0x83, 0xbe, 0xc0, 0x6f, 0x90, 0x64, 0xa2, 0x57, 0xec, 0x37, 0x08, 0x66, 0x18, 0x4a, 0x11, 0xc5, + 0xaa, 0x96, 0x11, 0xfd, 0x1a, 0x29, 0x2e, 0x88, 0x8f, 0x41, 0x7f, 0x83, 0xec, 0x92, 0x46, 0x32, + 0x1f, 0xd7, 0x35, 0x0a, 0xc4, 0x1c, 0x67, 0xff, 0x42, 0x1f, 0x9c, 0xe9, 0x18, 0x40, 0x88, 0x5e, + 0xc6, 0xb6, 0x9c, 0x88, 0xdc, 0x76, 0xf6, 0x92, 0x89, 0x73, 0x2e, 0x73, 0x30, 0x96, 0x78, 0xf6, + 0x6a, 0x91, 0xc7, 0xbf, 0x4f, 0xa8, 0x30, 0x45, 0xd8, 0x7b, 0x81, 0x35, 0x55, 0x62, 0xc5, 0xa3, + 0x50, 0x89, 0x3d, 0x07, 0x10, 0x86, 0x0d, 0xee, 0x4e, 0x57, 0x17, 0xcf, 0x21, 0xe3, 0x3c, 0x09, + 0xd5, 0x6b, 0x02, 0x83, 0x35, 0x2a, 0xb4, 0x08, 0x13, 0xad, 0xc0, 0x8f, 0xb8, 0x46, 0x78, 0x91, + 0x7b, 0x9c, 0xf6, 0x9b, 0xb1, 0x5b, 0x2a, 0x09, 0x3c, 0x4e, 0x95, 0x40, 0x2f, 0xc2, 0xb0, 0x88, + 0xe7, 0x52, 0xf1, 0xfd, 0x86, 0x50, 0x42, 0x29, 0x27, 0xcc, 0x6a, 0x8c, 0xc2, 0x3a, 0x9d, 0x56, + 0x8c, 0xa9, 0x99, 0x07, 0x33, 0x8b, 0x71, 0x55, 0xb3, 0x46, 0x97, 0x08, 0xf8, 0x3b, 0xd4, 0x53, + 0xc0, 0xdf, 0x58, 0x2d, 0x57, 0xea, 0xd9, 0xea, 0x09, 0x5d, 0x15, 0x59, 0x5f, 0xe9, 0x83, 0x29, + 0x31, 0x71, 0x1e, 0xf4, 0x74, 0xb9, 0x91, 0x9e, 0x2e, 0x47, 0xa1, 0xb8, 0x7b, 0x77, 0xce, 0x1c, + 0xf7, 0x9c, 0xf9, 0x11, 0x0b, 0x4c, 0x49, 0x0d, 0xfd, 0x7f, 0xb9, 0x99, 0xc7, 0x5e, 0xcc, 0x95, + 0xfc, 0x94, 0xc3, 0xe1, 0xdb, 0xcc, 0x41, 0x66, 0xff, 0x3b, 0x0b, 0x1e, 0xed, 0xca, 0x11, 0x2d, + 0x41, 0x89, 0x89, 0x93, 0xda, 0x45, 0xef, 0x49, 0xe5, 0x91, 0x2e, 0x11, 0x39, 0xd2, 0x6d, 0x5c, + 0x12, 0x2d, 0xa5, 0x52, 0xbc, 0x3d, 0x95, 0x91, 0xe2, 0xed, 0xa4, 0xd1, 0x3d, 0xf7, 0x99, 0xe3, + 0xed, 0x87, 0xe8, 0x89, 0x63, 0xbc, 0x06, 0x43, 0xef, 0x37, 0x94, 0x8e, 0x76, 0x42, 0xe9, 0x88, + 0x4c, 0x6a, 0xed, 0x0c, 0xf9, 0x08, 0x4c, 0xb0, 0x40, 0x6f, 0xec, 0x7d, 0x84, 0x78, 0xa7, 0x56, + 0x88, 0x7d, 0xa0, 0xaf, 0x25, 0x70, 0x38, 0x45, 0x6d, 0xff, 0x51, 0x11, 0x06, 0xf8, 0xf2, 0x3b, + 0x86, 0xeb, 0xe5, 0xd3, 0x50, 0x72, 0x9b, 0xcd, 0x36, 0xcf, 0xda, 0xd5, 0x1f, 0x7b, 0xd4, 0xae, + 0x48, 0x20, 0x8e, 0xf1, 0x68, 0x59, 0xe8, 0xbb, 0x3b, 0xc4, 0x92, 0xe5, 0x0d, 0x9f, 0x5d, 0x74, + 0x22, 0x87, 0xcb, 0x4a, 0xea, 0x9c, 0x8d, 0x35, 0xe3, 0xe8, 0x93, 0x00, 0x61, 0x14, 0xb8, 0xde, + 0x16, 0x85, 0x89, 0x10, 0xd6, 0xef, 0xed, 0xc0, 0xad, 0xaa, 0x88, 0x39, 0xcf, 0x78, 0xcf, 0x51, + 0x08, 0xac, 0x71, 0x44, 0xb3, 0xc6, 0x49, 0x3f, 0x93, 0x18, 0x3b, 0xe0, 0x5c, 0xe3, 0x31, 0x9b, + 0xf9, 0x00, 0x94, 0x14, 0xf3, 0x6e, 0xda, 0xaf, 0x11, 0x5d, 0x2c, 0xfa, 0x30, 0x8c, 0x27, 0xda, + 0x76, 0x28, 0xe5, 0xd9, 0x2f, 0x5a, 0x30, 0xce, 0x1b, 0xb3, 0xe4, 0xed, 0x8a, 0xd3, 0xe0, 0x2e, + 0x9c, 0x68, 0x64, 0xec, 0xca, 0x62, 0xf8, 0x7b, 0xdf, 0xc5, 0x95, 0xb2, 0x2c, 0x0b, 0x8b, 0x33, + 0xeb, 0x40, 0x17, 0xe8, 0x8a, 0xa3, 0xbb, 0xae, 0xd3, 0x10, 0xcf, 0xf2, 0x47, 0xf8, 0x6a, 0xe3, + 0x30, 0xac, 0xb0, 0xf6, 0xef, 0x59, 0x30, 0xc9, 0x5b, 0x7e, 0x95, 0xec, 0xa9, 0xbd, 0xe9, 0x5b, + 0xd9, 0x76, 0x91, 0x2f, 0xb2, 0x90, 0x93, 0x2f, 0x52, 0xff, 0xb4, 0x62, 0xc7, 0x4f, 0xfb, 0xb2, + 0x05, 0x62, 0x86, 0x1c, 0x83, 0x3e, 0xe3, 0xbb, 0x4c, 0x7d, 0xc6, 0x4c, 0xfe, 0x22, 0xc8, 0x51, + 0x64, 0xfc, 0x99, 0x05, 0x13, 0x9c, 0x20, 0xb6, 0xd5, 0x7f, 0x4b, 0xc7, 0xa1, 0x97, 0xac, 0xf2, + 0x57, 0xc9, 0xde, 0xba, 0x5f, 0x71, 0xa2, 0xed, 0xec, 0x8f, 0x32, 0x06, 0xab, 0xaf, 0xe3, 0x60, + 0xd5, 0xe5, 0x02, 0x32, 0xd2, 0x29, 0x75, 0x79, 0x5c, 0x7f, 0xd8, 0x74, 0x4a, 0xf6, 0x37, 0x2d, + 0x40, 0xbc, 0x1a, 0x43, 0x70, 0xa3, 0xe2, 0x10, 0x83, 0x6a, 0x07, 0x5d, 0xbc, 0x35, 0x29, 0x0c, + 0xd6, 0xa8, 0x8e, 0xa4, 0x7b, 0x12, 0x0e, 0x17, 0xc5, 0xee, 0x0e, 0x17, 0x87, 0xe8, 0xd1, 0x7f, + 0x31, 0x00, 0xc9, 0x17, 0x71, 0xe8, 0x26, 0x8c, 0xd4, 0x9c, 0x96, 0xb3, 0xe1, 0x36, 0xdc, 0xc8, + 0x25, 0x61, 0x27, 0x6f, 0xac, 0x05, 0x8d, 0x4e, 0x98, 0xc8, 0x35, 0x08, 0x36, 0xf8, 0xa0, 0x59, + 0x80, 0x56, 0xe0, 0xee, 0xba, 0x0d, 0xb2, 0xc5, 0xd4, 0x2e, 0x2c, 0x10, 0x08, 0x77, 0x0d, 0x93, + 0x50, 0xac, 0x51, 0x64, 0x84, 0x1f, 0x28, 0x3e, 0xe0, 0xf0, 0x03, 0x70, 0x6c, 0xe1, 0x07, 0xfa, + 0x0e, 0x15, 0x7e, 0x60, 0xe8, 0xd0, 0xe1, 0x07, 0xfa, 0x7b, 0x0a, 0x3f, 0x80, 0xe1, 0x94, 0x94, + 0x3d, 0xe9, 0xff, 0x65, 0xb7, 0x41, 0xc4, 0x85, 0x83, 0x47, 0x2f, 0x99, 0xb9, 0xb7, 0x5f, 0x3e, + 0x85, 0x33, 0x29, 0x70, 0x4e, 0x49, 0xf4, 0x51, 0x98, 0x76, 0x1a, 0x0d, 0xff, 0xb6, 0x1a, 0xd4, + 0xa5, 0xb0, 0xe6, 0x34, 0xb8, 0x09, 0x64, 0x90, 0x71, 0x7d, 0xe4, 0xde, 0x7e, 0x79, 0x7a, 0x2e, + 0x87, 0x06, 0xe7, 0x96, 0x46, 0x1f, 0x82, 0x52, 0x2b, 0xf0, 0x6b, 0xab, 0xda, 0xb3, 0xdd, 0xb3, + 0xb4, 0x03, 0x2b, 0x12, 0x78, 0xb0, 0x5f, 0x1e, 0x55, 0x7f, 0xd8, 0x81, 0x1f, 0x17, 0xc8, 0x88, + 0x27, 0x30, 0x7c, 0xa4, 0xf1, 0x04, 0x76, 0x60, 0xaa, 0x4a, 0x02, 0xd7, 0x69, 0xb8, 0x77, 0xa9, + 0xbc, 0x2c, 0xf7, 0xa7, 0x75, 0x28, 0x05, 0x89, 0x1d, 0xb9, 0xa7, 0xf8, 0xae, 0x5a, 0x5e, 0x1b, + 0xb9, 0x03, 0xc7, 0x8c, 0xec, 0xff, 0x65, 0xc1, 0xa0, 0x78, 0x01, 0x77, 0x0c, 0x52, 0xe3, 0x9c, + 0x61, 0x94, 0x28, 0x67, 0x77, 0x18, 0x6b, 0x4c, 0xae, 0x39, 0x62, 0x25, 0x61, 0x8e, 0x78, 0xb4, + 0x13, 0x93, 0xce, 0x86, 0x88, 0xbf, 0x5e, 0xa4, 0xd2, 0xbb, 0xf1, 0x16, 0xfb, 0xc1, 0x77, 0xc1, + 0x1a, 0x0c, 0x86, 0xe2, 0x2d, 0x70, 0x21, 0xff, 0x31, 0x46, 0x72, 0x10, 0x63, 0x2f, 0x3a, 0xf1, + 0xfa, 0x57, 0x32, 0xc9, 0x7c, 0x64, 0x5c, 0x7c, 0x80, 0x8f, 0x8c, 0xbb, 0xbd, 0x56, 0xef, 0x3b, + 0x8a, 0xd7, 0xea, 0xf6, 0xd7, 0xd8, 0xc9, 0xa9, 0xc3, 0x8f, 0x41, 0xa8, 0xba, 0x6c, 0x9e, 0xb1, + 0x76, 0x87, 0x99, 0x25, 0x1a, 0x95, 0x23, 0x5c, 0xfd, 0xbc, 0x05, 0x67, 0x32, 0xbe, 0x4a, 0x93, + 0xb4, 0x9e, 0x81, 0x21, 0xa7, 0x5d, 0x77, 0xd5, 0x5a, 0xd6, 0x4c, 0x93, 0x73, 0x02, 0x8e, 0x15, + 0x05, 0x5a, 0x80, 0x49, 0x72, 0xa7, 0xe5, 0x72, 0x43, 0xae, 0xee, 0x7c, 0x5c, 0xe4, 0xcf, 0x26, + 0x97, 0x92, 0x48, 0x9c, 0xa6, 0x57, 0x71, 0x8d, 0x8a, 0xb9, 0x71, 0x8d, 0xfe, 0x8e, 0x05, 0xc3, + 0xea, 0x35, 0xec, 0x03, 0xef, 0xed, 0x8f, 0x98, 0xbd, 0xfd, 0x70, 0x87, 0xde, 0xce, 0xe9, 0xe6, + 0xdf, 0x29, 0xa8, 0xf6, 0x56, 0xfc, 0x20, 0xea, 0x41, 0x82, 0xbb, 0xff, 0x87, 0x13, 0x97, 0x60, + 0xd8, 0x69, 0xb5, 0x24, 0x42, 0x7a, 0xc0, 0xb1, 0x68, 0xdd, 0x31, 0x18, 0xeb, 0x34, 0xea, 0x1d, + 0x47, 0x31, 0xf7, 0x1d, 0x47, 0x1d, 0x20, 0x72, 0x82, 0x2d, 0x12, 0x51, 0x98, 0x70, 0xd8, 0xcd, + 0xdf, 0x6f, 0xda, 0x91, 0xdb, 0x98, 0x75, 0xbd, 0x28, 0x8c, 0x82, 0xd9, 0x15, 0x2f, 0xba, 0x1e, + 0xf0, 0x2b, 0xa4, 0x16, 0x19, 0x4c, 0xf1, 0xc2, 0x1a, 0x5f, 0x19, 0xf9, 0x81, 0xd5, 0xd1, 0x6f, + 0xba, 0x52, 0xac, 0x09, 0x38, 0x56, 0x14, 0xf6, 0x07, 0xd8, 0xe9, 0xc3, 0xfa, 0xf4, 0x70, 0x51, + 0xb1, 0x7e, 0x6a, 0x44, 0x8d, 0x06, 0x33, 0x8a, 0x2e, 0xea, 0xb1, 0xb7, 0x3a, 0x6f, 0xf6, 0xb4, + 0x62, 0xfd, 0x41, 0x62, 0x1c, 0xa0, 0x0b, 0x7d, 0x3c, 0xe5, 0x1e, 0xf3, 0x6c, 0x97, 0x53, 0xe3, + 0x10, 0x0e, 0x31, 0x2c, 0x75, 0x0f, 0x4b, 0x6c, 0xb2, 0x52, 0x11, 0xeb, 0x42, 0x4b, 0xdd, 0x23, + 0x10, 0x38, 0xa6, 0xa1, 0xc2, 0x94, 0xfa, 0x13, 0x4e, 0xa3, 0x38, 0x84, 0xad, 0xa2, 0x0e, 0xb1, + 0x46, 0x81, 0x2e, 0x0a, 0x85, 0x02, 0xb7, 0x0b, 0x3c, 0x9c, 0x50, 0x28, 0xc8, 0xee, 0xd2, 0xb4, + 0x40, 0x97, 0x60, 0x58, 0x25, 0x6a, 0xaf, 0xf0, 0xa4, 0x59, 0x62, 0x9a, 0x2d, 0xc5, 0x60, 0xac, + 0xd3, 0xa0, 0x75, 0x18, 0x0f, 0xb9, 0x9e, 0x4d, 0xc5, 0x15, 0xe7, 0xfa, 0xca, 0xf7, 0xaa, 0x77, + 0xc8, 0x26, 0xfa, 0x80, 0x81, 0xf8, 0xee, 0x24, 0xa3, 0x33, 0x24, 0x59, 0xa0, 0x57, 0x61, 0xac, + 0xe1, 0x3b, 0xf5, 0x79, 0xa7, 0xe1, 0x78, 0x35, 0xd6, 0x3f, 0x43, 0x66, 0xbe, 0xdf, 0x6b, 0x06, + 0x16, 0x27, 0xa8, 0xa9, 0xf0, 0xa6, 0x43, 0x44, 0x74, 0x31, 0xc7, 0xdb, 0x22, 0xa1, 0x48, 0xbb, + 0xcd, 0x84, 0xb7, 0x6b, 0x39, 0x34, 0x38, 0xb7, 0x34, 0x7a, 0x09, 0x46, 0xe4, 0xe7, 0x6b, 0xc1, + 0x4c, 0xe2, 0x27, 0x31, 0x1a, 0x0e, 0x1b, 0x94, 0x28, 0x84, 0x93, 0xf2, 0xff, 0x7a, 0xe0, 0x6c, + 0x6e, 0xba, 0x35, 0xf1, 0xc2, 0x9f, 0x3f, 0xbb, 0xfd, 0xb0, 0x7c, 0x1b, 0xba, 0x94, 0x45, 0x74, + 0xb0, 0x5f, 0x7e, 0x44, 0xf4, 0x5a, 0x26, 0x1e, 0x67, 0xf3, 0x46, 0xab, 0x30, 0xb5, 0x4d, 0x9c, + 0x46, 0xb4, 0xbd, 0xb0, 0x4d, 0x6a, 0x3b, 0x72, 0xc1, 0xb1, 0xf0, 0x28, 0xda, 0xd3, 0x91, 0x2b, + 0x69, 0x12, 0x9c, 0x55, 0x0e, 0xbd, 0x09, 0xd3, 0xad, 0xf6, 0x46, 0xc3, 0x0d, 0xb7, 0xd7, 0xfc, + 0x88, 0x39, 0x21, 0xa9, 0x9c, 0xef, 0x22, 0x8e, 0x8a, 0x0a, 0x40, 0x53, 0xc9, 0xa1, 0xc3, 0xb9, + 0x1c, 0xd0, 0x5d, 0x38, 0x99, 0x98, 0x08, 0x22, 0x92, 0xc4, 0x58, 0x7e, 0x56, 0x91, 0x6a, 0x56, + 0x01, 0x11, 0x94, 0x25, 0x0b, 0x85, 0xb3, 0xab, 0x40, 0x2f, 0x03, 0xb8, 0xad, 0x65, 0xa7, 0xe9, + 0x36, 0xe8, 0x55, 0x71, 0x8a, 0xcd, 0x11, 0x7a, 0x6d, 0x80, 0x95, 0x8a, 0x84, 0xd2, 0xbd, 0x59, + 0xfc, 0xdb, 0xc3, 0x1a, 0x35, 0xba, 0x06, 0x63, 0xe2, 0xdf, 0x9e, 0x18, 0x52, 0x1e, 0xd0, 0xe4, + 0x71, 0x16, 0x8d, 0xaa, 0xa2, 0x63, 0x0e, 0x52, 0x10, 0x9c, 0x28, 0x8b, 0xb6, 0xe0, 0x8c, 0xcc, + 0x10, 0xa7, 0xcf, 0x4f, 0x39, 0x06, 0x21, 0x4b, 0xe5, 0x31, 0xc4, 0x5f, 0xa5, 0xcc, 0x75, 0x22, + 0xc4, 0x9d, 0xf9, 0xd0, 0x73, 0x5d, 0x9f, 0xe6, 0xfc, 0xc9, 0xef, 0x49, 0xee, 0xe1, 0x44, 0xcf, + 0xf5, 0x6b, 0x49, 0x24, 0x4e, 0xd3, 0x23, 0x1f, 0x4e, 0xba, 0x5e, 0xd6, 0xac, 0x3e, 0xc5, 0x18, + 0x7d, 0x90, 0xbf, 0x76, 0xee, 0x3c, 0xa3, 0x33, 0xf1, 0x38, 0x9b, 0xef, 0xdb, 0xf3, 0xfb, 0xfb, + 0x5d, 0x8b, 0x96, 0xd6, 0xa4, 0x73, 0xf4, 0x29, 0x18, 0xd1, 0x3f, 0x4a, 0x48, 0x1a, 0xe7, 0xb3, + 0x85, 0x57, 0x6d, 0x4f, 0xe0, 0xb2, 0xbd, 0x5a, 0xf7, 0x3a, 0x0e, 0x1b, 0x1c, 0x51, 0x2d, 0x23, + 0x26, 0xc0, 0xc5, 0xde, 0x24, 0x99, 0xde, 0xdd, 0xde, 0x08, 0x64, 0x4f, 0x77, 0x74, 0x0d, 0x86, + 0x6a, 0x0d, 0x97, 0x78, 0xd1, 0x4a, 0xa5, 0x53, 0xd4, 0xc3, 0x05, 0x41, 0x23, 0xd6, 0x8f, 0xc8, + 0xca, 0xc1, 0x61, 0x58, 0x71, 0xb0, 0x7f, 0xa3, 0x00, 0xe5, 0x2e, 0x29, 0x5e, 0x12, 0x66, 0x28, + 0xab, 0x27, 0x33, 0xd4, 0x1c, 0x8c, 0xc7, 0xff, 0x74, 0x0d, 0x97, 0xf2, 0x64, 0xbd, 0x69, 0xa2, + 0x71, 0x92, 0xbe, 0xe7, 0x47, 0x09, 0xba, 0x25, 0xab, 0xaf, 0xeb, 0xb3, 0x1a, 0xc3, 0x82, 0xdd, + 0xdf, 0xfb, 0xb5, 0x37, 0xd7, 0x1a, 0x69, 0x7f, 0xad, 0x00, 0x27, 0x55, 0x17, 0x7e, 0xe7, 0x76, + 0xdc, 0x8d, 0x74, 0xc7, 0x1d, 0x81, 0x2d, 0xd7, 0xbe, 0x0e, 0x03, 0x3c, 0x8c, 0x63, 0x0f, 0xe2, + 0xf6, 0x63, 0x66, 0x70, 0x67, 0x25, 0xe1, 0x19, 0x01, 0x9e, 0x7f, 0xc0, 0x82, 0xf1, 0xc4, 0xeb, + 0x36, 0x84, 0xb5, 0x27, 0xd0, 0xf7, 0x23, 0x12, 0x67, 0x09, 0xdb, 0xe7, 0xa0, 0x6f, 0xdb, 0x0f, + 0xa3, 0xa4, 0xa3, 0xc7, 0x15, 0x3f, 0x8c, 0x30, 0xc3, 0xd8, 0xbf, 0x6f, 0x41, 0xff, 0xba, 0xe3, + 0x7a, 0x91, 0x34, 0x0a, 0x58, 0x39, 0x46, 0x81, 0x5e, 0xbe, 0x0b, 0xbd, 0x08, 0x03, 0x64, 0x73, + 0x93, 0xd4, 0x22, 0x31, 0xaa, 0x32, 0xf4, 0xc4, 0xc0, 0x12, 0x83, 0x52, 0xf9, 0x8f, 0x55, 0xc6, + 0xff, 0x62, 0x41, 0x8c, 0x6e, 0x41, 0x29, 0x72, 0x9b, 0x64, 0xae, 0x5e, 0x17, 0xa6, 0xf2, 0xfb, + 0x08, 0x9f, 0xb1, 0x2e, 0x19, 0xe0, 0x98, 0x97, 0xfd, 0x85, 0x02, 0x40, 0x1c, 0xff, 0xaa, 0xdb, + 0x27, 0xce, 0xa7, 0x8c, 0xa8, 0xe7, 0x33, 0x8c, 0xa8, 0x28, 0x66, 0x98, 0x61, 0x41, 0x55, 0xdd, + 0x54, 0xec, 0xa9, 0x9b, 0xfa, 0x0e, 0xd3, 0x4d, 0x0b, 0x30, 0x19, 0xc7, 0xef, 0x32, 0xc3, 0x17, + 0xb2, 0xa3, 0x73, 0x3d, 0x89, 0xc4, 0x69, 0x7a, 0x9b, 0xc0, 0x39, 0x15, 0xc6, 0x48, 0x9c, 0x68, + 0xcc, 0x0f, 0x5c, 0x37, 0x4a, 0x77, 0xe9, 0xa7, 0xd8, 0x4a, 0x5c, 0xc8, 0xb5, 0x12, 0xff, 0xa4, + 0x05, 0x27, 0x92, 0xf5, 0xb0, 0x47, 0xd3, 0x9f, 0xb7, 0xe0, 0x24, 0xb3, 0x95, 0xb3, 0x5a, 0xd3, + 0x96, 0xf9, 0x17, 0x3a, 0x86, 0x66, 0xca, 0x69, 0x71, 0x1c, 0xe3, 0x64, 0x35, 0x8b, 0x35, 0xce, + 0xae, 0xd1, 0xfe, 0x9f, 0x7d, 0x30, 0x9d, 0x17, 0xd3, 0x89, 0x3d, 0x13, 0x71, 0xee, 0x54, 0x77, + 0xc8, 0x6d, 0xe1, 0x8c, 0x1f, 0x3f, 0x13, 0xe1, 0x60, 0x2c, 0xf1, 0xc9, 0xac, 0x1d, 0x85, 0x1e, + 0xb3, 0x76, 0x6c, 0xc3, 0xe4, 0xed, 0x6d, 0xe2, 0xdd, 0xf0, 0x42, 0x27, 0x72, 0xc3, 0x4d, 0x97, + 0xd9, 0x95, 0xf9, 0xbc, 0x91, 0xa9, 0x7e, 0x27, 0x6f, 0x25, 0x09, 0x0e, 0xf6, 0xcb, 0x67, 0x0c, + 0x40, 0xdc, 0x64, 0xbe, 0x91, 0xe0, 0x34, 0xd3, 0x74, 0xd2, 0x93, 0xbe, 0x07, 0x9c, 0xf4, 0xa4, + 0xe9, 0x0a, 0x6f, 0x14, 0xf9, 0x06, 0x80, 0xdd, 0x18, 0x57, 0x15, 0x14, 0x6b, 0x14, 0xe8, 0x13, + 0x80, 0xf4, 0xa4, 0x4e, 0x46, 0x48, 0xcd, 0x67, 0xef, 0xed, 0x97, 0xd1, 0x5a, 0x0a, 0x7b, 0xb0, + 0x5f, 0x9e, 0xa2, 0xd0, 0x15, 0x8f, 0xde, 0x3c, 0xe3, 0x38, 0x64, 0x19, 0x8c, 0xd0, 0x2d, 0x98, + 0xa0, 0x50, 0xb6, 0xa2, 0x64, 0xbc, 0x4e, 0x7e, 0x5b, 0x7c, 0xfa, 0xde, 0x7e, 0x79, 0x62, 0x2d, + 0x81, 0xcb, 0x63, 0x9d, 0x62, 0x82, 0x5e, 0x86, 0xb1, 0x78, 0x5e, 0x5d, 0x25, 0x7b, 0x3c, 0x3e, + 0x4e, 0x89, 0x2b, 0xbc, 0x57, 0x0d, 0x0c, 0x4e, 0x50, 0xda, 0x9f, 0xb7, 0xe0, 0x74, 0x6e, 0xe2, + 0x71, 0x74, 0x01, 0x86, 0x9c, 0x96, 0xcb, 0xcd, 0x17, 0xe2, 0xa8, 0x61, 0x6a, 0xb2, 0xca, 0x0a, + 0x37, 0x5e, 0x28, 0x2c, 0xdd, 0xe1, 0x77, 0x5c, 0xaf, 0x9e, 0xdc, 0xe1, 0xaf, 0xba, 0x5e, 0x1d, + 0x33, 0x8c, 0x3a, 0xb2, 0x8a, 0xb9, 0x4f, 0x11, 0xbe, 0x42, 0xd7, 0x6a, 0x46, 0x8a, 0xf2, 0xe3, + 0x6d, 0x06, 0x7a, 0x5a, 0x37, 0x35, 0x0a, 0xaf, 0xc2, 0x5c, 0x33, 0xe3, 0xf7, 0x5b, 0x20, 0x9e, + 0x2e, 0xf7, 0x70, 0x26, 0xbf, 0x01, 0x23, 0xbb, 0xe9, 0x84, 0x77, 0xe7, 0xf2, 0xdf, 0x72, 0x8b, + 0x40, 0xe1, 0x4a, 0xd0, 0x36, 0x92, 0xdb, 0x19, 0xbc, 0xec, 0x3a, 0x08, 0xec, 0x22, 0x61, 0x06, + 0x85, 0xee, 0xad, 0x79, 0x0e, 0xa0, 0xce, 0x68, 0x59, 0x16, 0xdc, 0x82, 0x29, 0x71, 0x2d, 0x2a, + 0x0c, 0xd6, 0xa8, 0xec, 0x7f, 0x55, 0x80, 0x61, 0x99, 0x60, 0xad, 0xed, 0xf5, 0xa2, 0xf6, 0x3b, + 0x54, 0xc6, 0x65, 0x74, 0x11, 0x4a, 0x4c, 0x2f, 0x5d, 0x89, 0xb5, 0xa5, 0x4a, 0x2b, 0xb4, 0x2a, + 0x11, 0x38, 0xa6, 0xa1, 0xbb, 0x63, 0xd8, 0xde, 0x60, 0xe4, 0x89, 0x87, 0xb6, 0x55, 0x0e, 0xc6, + 0x12, 0x8f, 0x3e, 0x0a, 0x13, 0xbc, 0x5c, 0xe0, 0xb7, 0x9c, 0x2d, 0x6e, 0xcb, 0xea, 0x57, 0xd1, + 0x4b, 0x26, 0x56, 0x13, 0xb8, 0x83, 0xfd, 0xf2, 0x89, 0x24, 0x8c, 0x19, 0x69, 0x53, 0x5c, 0x98, + 0xcb, 0x1a, 0xaf, 0x84, 0xee, 0xea, 0x29, 0x4f, 0xb7, 0x18, 0x85, 0x75, 0x3a, 0xfb, 0x53, 0x80, + 0xd2, 0xa9, 0xe6, 0xd0, 0x6b, 0xdc, 0xe5, 0xd9, 0x0d, 0x48, 0xbd, 0x93, 0xd1, 0x56, 0x8f, 0xd1, + 0x21, 0xdf, 0xc8, 0xf1, 0x52, 0x58, 0x95, 0xb7, 0xff, 0x52, 0x11, 0x26, 0x92, 0x51, 0x01, 0xd0, + 0x15, 0x18, 0xe0, 0x22, 0xa5, 0x60, 0xdf, 0xc1, 0x27, 0x48, 0x8b, 0x25, 0xc0, 0x0e, 0x57, 0x21, + 0x95, 0x8a, 0xf2, 0xe8, 0x4d, 0x18, 0xae, 0xfb, 0xb7, 0xbd, 0xdb, 0x4e, 0x50, 0x9f, 0xab, 0xac, + 0x88, 0xe9, 0x9c, 0xa9, 0xa8, 0x58, 0x8c, 0xc9, 0xf4, 0xf8, 0x04, 0xcc, 0xfe, 0x1d, 0xa3, 0xb0, + 0xce, 0x0e, 0xad, 0xb3, 0xfc, 0x14, 0x9b, 0xee, 0xd6, 0xaa, 0xd3, 0xea, 0xf4, 0xfe, 0x65, 0x41, + 0x12, 0x69, 0x9c, 0x47, 0x45, 0x12, 0x0b, 0x8e, 0xc0, 0x31, 0x23, 0xf4, 0x19, 0x98, 0x0a, 0x73, + 0x4c, 0x27, 0x79, 0x99, 0x47, 0x3b, 0x59, 0x13, 0xe6, 0x1f, 0xba, 0xb7, 0x5f, 0x9e, 0xca, 0x32, + 0xb2, 0x64, 0x55, 0x63, 0x7f, 0xf1, 0x04, 0x18, 0x8b, 0xd8, 0x48, 0x44, 0x6d, 0x1d, 0x51, 0x22, + 0x6a, 0x0c, 0x43, 0xa4, 0xd9, 0x8a, 0xf6, 0x16, 0xdd, 0x40, 0x8c, 0x49, 0x26, 0xcf, 0x25, 0x41, + 0x93, 0xe6, 0x29, 0x31, 0x58, 0xf1, 0xc9, 0xce, 0x16, 0x5e, 0xfc, 0x16, 0x66, 0x0b, 0xef, 0x3b, + 0xc6, 0x6c, 0xe1, 0x6b, 0x30, 0xb8, 0xe5, 0x46, 0x98, 0xb4, 0x7c, 0x71, 0x99, 0xcb, 0x9c, 0x87, + 0x97, 0x39, 0x49, 0x3a, 0x2f, 0xad, 0x40, 0x60, 0xc9, 0x04, 0xbd, 0xa6, 0x56, 0xe0, 0x40, 0xbe, + 0xc2, 0x25, 0xed, 0xbc, 0x92, 0xb9, 0x06, 0x45, 0x4e, 0xf0, 0xc1, 0xfb, 0xcd, 0x09, 0xbe, 0x2c, + 0x33, 0x79, 0x0f, 0xe5, 0x3f, 0x56, 0x63, 0x89, 0xba, 0xbb, 0xe4, 0xef, 0xbe, 0xa9, 0x67, 0x3f, + 0x2f, 0xe5, 0xef, 0x04, 0x2a, 0xb1, 0x79, 0x8f, 0x39, 0xcf, 0xbf, 0xdf, 0x82, 0x93, 0xc9, 0xec, + 0xa4, 0xec, 0x4d, 0x85, 0xf0, 0xf3, 0x78, 0xb1, 0x97, 0x74, 0xb1, 0xac, 0x80, 0x51, 0x21, 0xd3, + 0x91, 0x66, 0x92, 0xe1, 0xec, 0xea, 0x68, 0x47, 0x07, 0x1b, 0x75, 0xe1, 0x6f, 0xf0, 0x58, 0x4e, + 0xf2, 0xf4, 0x0e, 0x29, 0xd3, 0xd7, 0x33, 0x12, 0x75, 0x3f, 0x9e, 0x97, 0xa8, 0xbb, 0xe7, 0xf4, + 0xdc, 0xaf, 0xa9, 0xb4, 0xe9, 0xa3, 0xf9, 0x53, 0x89, 0x27, 0x45, 0xef, 0x9a, 0x2c, 0xfd, 0x35, + 0x95, 0x2c, 0xbd, 0x43, 0x44, 0x6e, 0x9e, 0x0a, 0xbd, 0x6b, 0x8a, 0x74, 0x2d, 0xcd, 0xf9, 0xf8, + 0xd1, 0xa4, 0x39, 0x37, 0x8e, 0x1a, 0x9e, 0x69, 0xfb, 0xe9, 0x2e, 0x47, 0x8d, 0xc1, 0xb7, 0xf3, + 0x61, 0xc3, 0x53, 0xba, 0x4f, 0xde, 0x57, 0x4a, 0xf7, 0x9b, 0x7a, 0x8a, 0x74, 0xd4, 0x25, 0x07, + 0x38, 0x25, 0xea, 0x31, 0x31, 0xfa, 0x4d, 0xfd, 0x00, 0x9c, 0xca, 0xe7, 0xab, 0xce, 0xb9, 0x34, + 0xdf, 0xcc, 0x23, 0x30, 0x95, 0x70, 0xfd, 0xc4, 0xf1, 0x24, 0x5c, 0x3f, 0x79, 0xe4, 0x09, 0xd7, + 0x4f, 0x1d, 0x43, 0xc2, 0xf5, 0x87, 0x8e, 0x31, 0xe1, 0xfa, 0x4d, 0xe6, 0x1c, 0xc5, 0x03, 0x40, + 0x89, 0x08, 0xe2, 0x4f, 0xe5, 0xc4, 0x4f, 0x4b, 0x47, 0x89, 0xe2, 0x1f, 0xa7, 0x50, 0x38, 0x66, + 0x95, 0x91, 0xc8, 0x7d, 0xfa, 0x01, 0x24, 0x72, 0x5f, 0x8b, 0x13, 0xb9, 0x9f, 0xce, 0x1f, 0xea, + 0x8c, 0xe7, 0x34, 0x39, 0xe9, 0xdb, 0x6f, 0xea, 0x69, 0xd7, 0x1f, 0xee, 0x60, 0x05, 0xcb, 0x52, + 0x28, 0x77, 0x48, 0xb6, 0xfe, 0x2a, 0x4f, 0xb6, 0xfe, 0x48, 0xfe, 0x4e, 0x9e, 0x3c, 0xee, 0x8c, + 0x14, 0xeb, 0xb4, 0x5d, 0x2a, 0xf6, 0x2a, 0x8b, 0x95, 0x9e, 0xd3, 0x2e, 0x15, 0xbc, 0x35, 0xdd, + 0x2e, 0x85, 0xc2, 0x31, 0x2b, 0xfb, 0x07, 0x0b, 0x70, 0xb6, 0xf3, 0x7a, 0x8b, 0xb5, 0xe4, 0x95, + 0xd8, 0x21, 0x20, 0xa1, 0x25, 0xe7, 0x77, 0xb6, 0x98, 0xaa, 0xe7, 0x78, 0x90, 0x97, 0x61, 0x52, + 0xbd, 0xc3, 0x69, 0xb8, 0xb5, 0xbd, 0xb5, 0xf8, 0x9a, 0xac, 0x22, 0x27, 0x54, 0x93, 0x04, 0x38, + 0x5d, 0x06, 0xcd, 0xc1, 0xb8, 0x01, 0x5c, 0x59, 0x14, 0x77, 0xb3, 0x38, 0x3a, 0xb7, 0x89, 0xc6, + 0x49, 0x7a, 0xfb, 0x4b, 0x16, 0x3c, 0x94, 0x93, 0xa9, 0xb4, 0xe7, 0x70, 0x87, 0x9b, 0x30, 0xde, + 0x32, 0x8b, 0x76, 0x89, 0xd0, 0x6a, 0xe4, 0x43, 0x55, 0x6d, 0x4d, 0x20, 0x70, 0x92, 0xa9, 0xfd, + 0xb3, 0x05, 0x38, 0xd3, 0xd1, 0xb1, 0x14, 0x61, 0x38, 0xb5, 0xd5, 0x0c, 0x9d, 0x85, 0x80, 0xd4, + 0x89, 0x17, 0xb9, 0x4e, 0xa3, 0xda, 0x22, 0x35, 0xcd, 0xce, 0xc1, 0x3c, 0x34, 0x2f, 0xaf, 0x56, + 0xe7, 0xd2, 0x14, 0x38, 0xa7, 0x24, 0x5a, 0x06, 0x94, 0xc6, 0x88, 0x11, 0x66, 0x51, 0xf7, 0xd3, + 0xfc, 0x70, 0x46, 0x09, 0xf4, 0x01, 0x18, 0x55, 0x0e, 0xab, 0xda, 0x88, 0xb3, 0x8d, 0x1d, 0xeb, + 0x08, 0x6c, 0xd2, 0xa1, 0x4b, 0x3c, 0x6d, 0x83, 0x48, 0xf0, 0x21, 0x8c, 0x22, 0xe3, 0x32, 0x27, + 0x83, 0x00, 0x63, 0x9d, 0x66, 0xfe, 0xa5, 0xdf, 0xfc, 0xc6, 0xd9, 0xf7, 0xfc, 0xf6, 0x37, 0xce, + 0xbe, 0xe7, 0xf7, 0xbe, 0x71, 0xf6, 0x3d, 0xdf, 0x7d, 0xef, 0xac, 0xf5, 0x9b, 0xf7, 0xce, 0x5a, + 0xbf, 0x7d, 0xef, 0xac, 0xf5, 0x7b, 0xf7, 0xce, 0x5a, 0x7f, 0x70, 0xef, 0xac, 0xf5, 0x85, 0x3f, + 0x3c, 0xfb, 0x9e, 0x37, 0x50, 0x1c, 0x40, 0xf4, 0x22, 0x1d, 0x9d, 0x8b, 0xbb, 0x97, 0xfe, 0x5f, + 0x00, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x6c, 0x51, 0x7f, 0x2c, 0x10, 0x01, 0x00, } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { @@ -8714,6 +8752,22 @@ func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.ResizePolicy) > 0 { + for iNdEx := len(m.ResizePolicy) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResizePolicy[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } if m.StartupProbe != nil { { size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i]) @@ -9022,6 +9076,39 @@ func (m *ContainerPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ContainerResizePolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResizePolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResizePolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.RestartPolicy) + copy(dAtA[i:], m.RestartPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RestartPolicy))) + i-- + dAtA[i] = 0x12 + i -= len(m.ResourceName) + copy(dAtA[i:], m.ResourceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ContainerState) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9231,6 +9318,47 @@ func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Resources != nil { + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if len(m.AllocatedResources) > 0 { + keysForAllocatedResources := make([]string, 0, len(m.AllocatedResources)) + for k := range m.AllocatedResources { + keysForAllocatedResources = append(keysForAllocatedResources, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResources) + for iNdEx := len(keysForAllocatedResources) - 1; iNdEx >= 0; iNdEx-- { + v := m.AllocatedResources[ResourceName(keysForAllocatedResources[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForAllocatedResources[iNdEx]) + copy(dAtA[i:], keysForAllocatedResources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAllocatedResources[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x52 + } + } if m.Started != nil { i-- if *m.Started { @@ -9977,6 +10105,22 @@ func (m *EphemeralContainerCommon) MarshalToSizedBuffer(dAtA []byte) (int, error _ = i var l int _ = l + if len(m.ResizePolicy) > 0 { + for iNdEx := len(m.ResizePolicy) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResizePolicy[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } if m.StartupProbe != nil { { size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i]) @@ -15792,6 +15936,11 @@ func (m *PodStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.Resize) + copy(dAtA[i:], m.Resize) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resize))) + i-- + dAtA[i] = 0x72 if len(m.EphemeralContainerStatuses) > 0 { for iNdEx := len(m.EphemeralContainerStatuses) - 1; iNdEx >= 0; iNdEx-- { { @@ -20815,6 +20964,12 @@ func (m *Container) Size() (n int) { l = m.StartupProbe.Size() n += 2 + l + sovGenerated(uint64(l)) } + if len(m.ResizePolicy) > 0 { + for _, e := range m.ResizePolicy { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -20851,6 +21006,19 @@ func (m *ContainerPort) Size() (n int) { return n } +func (m *ContainerResizePolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ResourceName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RestartPolicy) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ContainerState) Size() (n int) { if m == nil { return 0 @@ -20940,6 +21108,19 @@ func (m *ContainerStatus) Size() (n int) { if m.Started != nil { n += 2 } + if len(m.AllocatedResources) > 0 { + for k, v := range m.AllocatedResources { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Resources != nil { + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -21282,6 +21463,12 @@ func (m *EphemeralContainerCommon) Size() (n int) { l = m.StartupProbe.Size() n += 2 + l + sovGenerated(uint64(l)) } + if len(m.ResizePolicy) > 0 { + for _, e := range m.ResizePolicy { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -23363,6 +23550,8 @@ func (m *PodStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + l = len(m.Resize) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -25367,6 +25556,11 @@ func (this *Container) String() string { repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + "," } repeatedStringForVolumeDevices += "}" + repeatedStringForResizePolicy := "[]ContainerResizePolicy{" + for _, f := range this.ResizePolicy { + repeatedStringForResizePolicy += strings.Replace(strings.Replace(f.String(), "ContainerResizePolicy", "ContainerResizePolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForResizePolicy += "}" s := strings.Join([]string{`&Container{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Image:` + fmt.Sprintf("%v", this.Image) + `,`, @@ -25390,6 +25584,7 @@ func (this *Container) String() string { `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, `VolumeDevices:` + repeatedStringForVolumeDevices + `,`, `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, + `ResizePolicy:` + repeatedStringForResizePolicy + `,`, `}`, }, "") return s @@ -25419,6 +25614,17 @@ func (this *ContainerPort) String() string { }, "") return s } +func (this *ContainerResizePolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResizePolicy{`, + `ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`, + `RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`, + `}`, + }, "") + return s +} func (this *ContainerState) String() string { if this == nil { return "nil" @@ -25472,6 +25678,16 @@ func (this *ContainerStatus) String() string { if this == nil { return "nil" } + keysForAllocatedResources := make([]string, 0, len(this.AllocatedResources)) + for k := range this.AllocatedResources { + keysForAllocatedResources = append(keysForAllocatedResources, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResources) + mapStringForAllocatedResources := "ResourceList{" + for _, k := range keysForAllocatedResources { + mapStringForAllocatedResources += fmt.Sprintf("%v: %v,", k, this.AllocatedResources[ResourceName(k)]) + } + mapStringForAllocatedResources += "}" s := strings.Join([]string{`&ContainerStatus{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `State:` + strings.Replace(strings.Replace(this.State.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, @@ -25482,6 +25698,8 @@ func (this *ContainerStatus) String() string { `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, `Started:` + valueToStringGenerated(this.Started) + `,`, + `AllocatedResources:` + mapStringForAllocatedResources + `,`, + `Resources:` + strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1) + `,`, `}`, }, "") return s @@ -25713,6 +25931,11 @@ func (this *EphemeralContainerCommon) String() string { repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + "," } repeatedStringForVolumeDevices += "}" + repeatedStringForResizePolicy := "[]ContainerResizePolicy{" + for _, f := range this.ResizePolicy { + repeatedStringForResizePolicy += strings.Replace(strings.Replace(f.String(), "ContainerResizePolicy", "ContainerResizePolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForResizePolicy += "}" s := strings.Join([]string{`&EphemeralContainerCommon{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Image:` + fmt.Sprintf("%v", this.Image) + `,`, @@ -25736,6 +25959,7 @@ func (this *EphemeralContainerCommon) String() string { `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, `VolumeDevices:` + repeatedStringForVolumeDevices + `,`, `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, + `ResizePolicy:` + repeatedStringForResizePolicy + `,`, `}`, }, "") return s @@ -27323,6 +27547,7 @@ func (this *PodStatus) String() string { `NominatedNodeName:` + fmt.Sprintf("%v", this.NominatedNodeName) + `,`, `PodIPs:` + repeatedStringForPodIPs + `,`, `EphemeralContainerStatuses:` + repeatedStringForEphemeralContainerStatuses + `,`, + `Resize:` + fmt.Sprintf("%v", this.Resize) + `,`, `}`, }, "") return s @@ -33866,6 +34091,40 @@ func (m *Container) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResizePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResizePolicy = append(m.ResizePolicy, ContainerResizePolicy{}) + if err := m.ResizePolicy[len(m.ResizePolicy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -34172,6 +34431,120 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } return nil } +func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResizePolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResizePolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceName = ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RestartPolicy = ResourceResizeRestartPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ContainerState) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -35060,6 +35433,171 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.Started = &b + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AllocatedResources == nil { + m.AllocatedResources = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AllocatedResources[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &ResourceRequirements{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -37706,6 +38244,40 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResizePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResizePolicy = append(m.ResizePolicy, ContainerResizePolicy{}) + if err := m.ResizePolicy[len(m.ResizePolicy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -55879,6 +56451,38 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resize", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resize = PodResizeStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -65016,7 +65620,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExternalTrafficPolicy = ServiceExternalTrafficPolicyType(dAtA[iNdEx:postIndex]) + m.ExternalTrafficPolicy = ServiceExternalTrafficPolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 12: if wireType != 0 { @@ -65274,7 +65878,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := ServiceInternalTrafficPolicyType(dAtA[iNdEx:postIndex]) + s := ServiceInternalTrafficPolicy(dAtA[iNdEx:postIndex]) m.InternalTrafficPolicy = &s iNdEx = postIndex default: diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index 416811e29..8ef67ca40 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -220,7 +220,6 @@ message CSIPersistentVolumeSource { // controllerExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerExpandVolume call. - // This is an beta field and requires enabling ExpandCSIVolumes feature gate. // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional @@ -229,9 +228,10 @@ message CSIPersistentVolumeSource { // nodeExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodeExpandVolume call. - // This is an alpha field and requires enabling CSINodeExpandSecret feature gate. + // This is a beta field which is enabled default by CSINodeExpandSecret feature gate. // This field is optional, may be omitted if no secret is required. If the // secret object contains more than one secret, all secrets are passed. + // +featureGate=CSINodeExpandSecret // +optional optional SecretReference nodeExpandSecretRef = 10; } @@ -723,6 +723,12 @@ message Container { // +optional optional ResourceRequirements resources = 8; + // Resources resize policy for the container. + // +featureGate=InPlacePodVerticalScaling + // +optional + // +listType=atomic + repeated ContainerResizePolicy resizePolicy = 23; + // Pod volumes to mount into the container's filesystem. // Cannot be updated. // +optional @@ -863,6 +869,17 @@ message ContainerPort { optional string hostIP = 5; } +// ContainerResizePolicy represents resource resize policy for the container. +message ContainerResizePolicy { + // Name of the resource to which this resource resize policy applies. + // Supported values: cpu, memory. + optional string resourceName = 1; + + // Restart policy to apply when specified resource is resized. + // If not specified, it defaults to NotRequired. + optional string restartPolicy = 2; +} + // ContainerState holds a possible state of container. // Only one of its members may be specified. // If none of them is specified, the default one is ContainerStateWaiting. @@ -930,41 +947,76 @@ message ContainerStateWaiting { // ContainerStatus contains details for the current status of this container. message ContainerStatus { - // This must be a DNS_LABEL. Each container in a pod must have a unique name. + // Name is a DNS_LABEL representing the unique name of the container. + // Each container in a pod must have a unique name across all container types. // Cannot be updated. optional string name = 1; - // Details about the container's current condition. + // State holds details about the container's current condition. // +optional optional ContainerState state = 2; - // Details about the container's last termination condition. + // LastTerminationState holds the last termination state of the container to + // help debug container crashes and restarts. This field is not + // populated if the container is still running and RestartCount is 0. // +optional optional ContainerState lastState = 3; - // Specifies whether the container has passed its readiness probe. + // Ready specifies whether the container is currently passing its readiness check. + // The value will change as readiness probes keep executing. If no readiness + // probes are specified, this field defaults to true once the container is + // fully started (see Started field). + // + // The value is typically used to determine whether a container is ready to + // accept traffic. optional bool ready = 4; - // The number of times the container has been restarted. + // RestartCount holds the number of times the container has been restarted. + // Kubelet makes an effort to always increment the value, but there + // are cases when the state may be lost due to node restarts and then the value + // may be reset to 0. The value is never negative. optional int32 restartCount = 5; - // The image the container is running. + // Image is the name of container image that the container is running. + // The container image may not match the image used in the PodSpec, + // as it may have been resolved by the runtime. // More info: https://kubernetes.io/docs/concepts/containers/images. optional string image = 6; - // ImageID of the container's image. + // ImageID is the image ID of the container's image. The image ID may not + // match the image ID of the image used in the PodSpec, as it may have been + // resolved by the runtime. optional string imageID = 7; - // Container's ID in the format '://'. + // ContainerID is the ID of the container in the format '://'. + // Where type is a container runtime identifier, returned from Version call of CRI API + // (for example "containerd"). // +optional optional string containerID = 8; - // Specifies whether the container has passed its startup probe. - // Initialized as false, becomes true after startupProbe is considered successful. - // Resets to false when the container is restarted, or if kubelet loses state temporarily. - // Is always true when no startupProbe is defined. + // Started indicates whether the container has finished its postStart lifecycle hook + // and passed its startup probe. + // Initialized as false, becomes true after startupProbe is considered + // successful. Resets to false when the container is restarted, or if kubelet + // loses state temporarily. In both cases, startup probes will run again. + // Is always true when no startupProbe is defined and container is running and + // has passed the postStart lifecycle hook. The null value must be treated the + // same as false. // +optional optional bool started = 9; + + // AllocatedResources represents the compute resources allocated for this container by the + // node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission + // and after successfully admitting desired pod resize. + // +featureGate=InPlacePodVerticalScaling + // +optional + map allocatedResources = 10; + + // Resources represents the compute resource requests and limits that have been successfully + // enacted on the running container after it has been started or has been successfully resized. + // +featureGate=InPlacePodVerticalScaling + // +optional + optional ResourceRequirements resources = 11; } // DaemonEndpoint contains information about a single Daemon endpoint. @@ -1040,7 +1092,7 @@ message EmptyDirVolumeSource { // The maximum usage on memory medium EmptyDir would be the minimum value between // the SizeLimit specified here and the sum of memory limits of all containers in a pod. // The default is nil which means that the limit is undefined. - // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity sizeLimit = 2; } @@ -1049,11 +1101,8 @@ message EmptyDirVolumeSource { // +structType=atomic message EndpointAddress { // The IP of this endpoint. - // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), - // or link-local multicast ((224.0.0.0/24). - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, See #4447. + // May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), + // or link-local multicast (224.0.0.0/24 or ff02::/16). optional string ip = 1; // The Hostname of this endpoint @@ -1089,10 +1138,17 @@ message EndpointPort { optional string protocol = 3; // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional optional string appProtocol = 4; @@ -1324,6 +1380,12 @@ message EphemeralContainerCommon { // +optional optional ResourceRequirements resources = 8; + // Resources resize policy for the container. + // +featureGate=InPlacePodVerticalScaling + // +optional + // +listType=atomic + repeated ContainerResizePolicy resizePolicy = 23; + // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. // Cannot be updated. // +optional @@ -1791,7 +1853,8 @@ message HTTPGetAction { // HTTPHeader describes a custom header to be used in HTTP probes message HTTPHeader { - // The header field name + // The header field name. + // This will be canonicalized upon output, so case-variant names will be understood as the same header. optional string name = 1; // The header field value @@ -2484,6 +2547,10 @@ message NodeStatus { // Note: This field is declared as mergeable, but the merge key is not sufficiently // unique, which can cause data corruption when it is merged. Callers should instead // use a full-replacement patch. See https://pr.k8s.io/79391 for an example. + // Consumers should assume that addresses can change during the + // lifetime of a Node. However, there are some exceptions where this may not + // be possible, such as Pods that inherit a Node's address in its own status or + // consumers of the downward API (status.hostIP). // +optional // +patchMergeKey=type // +patchStrategy=merge @@ -2665,7 +2732,7 @@ message PersistentVolumeClaim { optional PersistentVolumeClaimStatus status = 3; } -// PersistentVolumeClaimCondition contails details about state of pvc +// PersistentVolumeClaimCondition contains details about state of pvc message PersistentVolumeClaimCondition { optional string type = 1; @@ -3553,7 +3620,7 @@ message PodSpec { repeated EphemeralContainer ephemeralContainers = 34; // Restart policy for all containers within the pod. - // One of Always, OnFailure, Never. + // One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. // Default to Always. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy // +optional @@ -3808,14 +3875,19 @@ message PodSpec { optional bool hostUsers = 37; // SchedulingGates is an opaque list of values that if specified will block scheduling the pod. - // More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness. + // If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + // scheduler will not attempt to schedule the pod. + // + // SchedulingGates can only be set at pod creation time, and be removed only afterwards. + // + // This is a beta feature enabled by the PodSchedulingReadiness feature gate. // - // This is an alpha-level feature enabled by PodSchedulingReadiness feature gate. - // +optional // +patchMergeKey=name // +patchStrategy=merge // +listType=map // +listMapKey=name + // +featureGate=PodSchedulingReadiness + // +optional repeated PodSchedulingGate schedulingGates = 38; // ResourceClaims defines which ResourceClaims must be allocated @@ -3923,13 +3995,20 @@ message PodStatus { // The Quality of Service (QOS) classification assigned to the pod based on resource requirements // See PodQOSClass type for available QOS classes - // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes // +optional optional string qosClass = 9; // Status for any ephemeral containers that have run in this pod. // +optional repeated ContainerStatus ephemeralContainerStatuses = 13; + + // Status of resources resize desired for pod's containers. + // It is empty if no resources resize is pending. + // Any changes to container resources will automatically set this to "Proposed" + // +featureGate=InPlacePodVerticalScaling + // +optional + optional string resize = 14; } // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded @@ -4122,8 +4201,6 @@ message ProbeHandler { optional TCPSocketAction tcpSocket = 3; // GRPC specifies an action involving a GRPC port. - // This is a beta field and requires enabling GRPCContainerProbe feature gate. - // +featureGate=GRPCContainerProbe // +optional optional GRPCAction grpc = 4; } @@ -4373,6 +4450,7 @@ message ReplicationControllerSpec { // Template is the object that describes the pod that will be created if // insufficient replicas are detected. This takes precedence over a TemplateRef. + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional optional PodTemplateSpec template = 3; @@ -4501,7 +4579,7 @@ message ResourceRequirements { // Requests describes the minimum amount of compute resources required. // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value. + // otherwise to an implementation-defined value. Requests cannot exceed Limits. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional map requests = 2; @@ -5628,8 +5706,12 @@ message TopologySpreadConstraint { // spreading will be calculated. The keys are used to lookup values from the // incoming pod labels, those key-value labels are ANDed with labelSelector // to select the group of existing pods over which spreading will be calculated - // for the incoming pod. Keys that don't exist in the incoming pod labels will + // for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + // MatchLabelKeys cannot be set when LabelSelector isn't set. + // Keys that don't exist in the incoming pod labels will // be ignored. A null or empty list means only match against labelSelector. + // + // This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). // +listType=atomic // +optional repeated string matchLabelKeys = 8; diff --git a/vendor/k8s.io/api/core/v1/toleration.go b/vendor/k8s.io/api/core/v1/toleration.go index 9341abf89..e803d518b 100644 --- a/vendor/k8s.io/api/core/v1/toleration.go +++ b/vendor/k8s.io/api/core/v1/toleration.go @@ -28,15 +28,13 @@ func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { // ToleratesTaint checks if the toleration tolerates the taint. // The matching follows the rules below: -// (1) Empty toleration.effect means to match all taint effects, // -// otherwise taint effect must equal to toleration.effect. -// -// (2) If toleration.operator is 'Exists', it means to match all taint values. -// (3) Empty toleration.key means to match all taint keys. -// -// If toleration.key is empty, toleration.operator must be 'Exists'; -// this combination means to match all taint values and all taint keys. +// 1. Empty toleration.effect means to match all taint effects, +// otherwise taint effect must equal to toleration.effect. +// 2. If toleration.operator is 'Exists', it means to match all taint values. +// 3. Empty toleration.key means to match all taint keys. +// If toleration.key is empty, toleration.operator must be 'Exists'; +// this combination means to match all taint values and all taint keys. func (t *Toleration) ToleratesTaint(taint *Taint) bool { if len(t.Effect) > 0 && t.Effect != taint.Effect { return false diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index 0101e95d9..c831d5961 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -577,7 +577,7 @@ const ( PersistentVolumeClaimNodeExpansionFailed PersistentVolumeClaimResizeStatus = "NodeExpansionFailed" ) -// PersistentVolumeClaimCondition contails details about state of pvc +// PersistentVolumeClaimCondition contains details about state of pvc type PersistentVolumeClaimCondition struct { Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"` Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` @@ -735,7 +735,7 @@ type EmptyDirVolumeSource struct { // The maximum usage on memory medium EmptyDir would be the minimum value between // the SizeLimit specified here and the sum of memory limits of all containers in a pod. // The default is nil which means that the limit is undefined. - // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir // +optional SizeLimit *resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"` } @@ -1826,7 +1826,6 @@ type CSIPersistentVolumeSource struct { // controllerExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerExpandVolume call. - // This is an beta field and requires enabling ExpandCSIVolumes feature gate. // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional @@ -1835,9 +1834,10 @@ type CSIPersistentVolumeSource struct { // nodeExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodeExpandVolume call. - // This is an alpha field and requires enabling CSINodeExpandSecret feature gate. + // This is a beta field which is enabled default by CSINodeExpandSecret feature gate. // This field is optional, may be omitted if no secret is required. If the // secret object contains more than one secret, all secrets are passed. + // +featureGate=CSINodeExpandSecret // +optional NodeExpandSecretRef *SecretReference `json:"nodeExpandSecretRef,omitempty" protobuf:"bytes,10,opt,name=nodeExpandSecretRef"` } @@ -2137,7 +2137,8 @@ type SecretEnvSource struct { // HTTPHeader describes a custom header to be used in HTTP probes type HTTPHeader struct { - // The header field name + // The header field name. + // This will be canonicalized upon output, so case-variant names will be understood as the same header. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // The header field value Value string `json:"value" protobuf:"bytes,2,opt,name=value"` @@ -2264,6 +2265,33 @@ const ( PullIfNotPresent PullPolicy = "IfNotPresent" ) +// ResourceResizeRestartPolicy specifies how to handle container resource resize. +type ResourceResizeRestartPolicy string + +// These are the valid resource resize restart policy values: +const ( + // 'NotRequired' means Kubernetes will try to resize the container + // without restarting it, if possible. Kubernetes may however choose to + // restart the container if it is unable to actuate resize without a + // restart. For e.g. the runtime doesn't support restart-free resizing. + NotRequired ResourceResizeRestartPolicy = "NotRequired" + // 'RestartContainer' means Kubernetes will resize the container in-place + // by stopping and starting the container when new resources are applied. + // This is needed for legacy applications. For e.g. java apps using the + // -xmxN flag which are unable to use resized memory without restarting. + RestartContainer ResourceResizeRestartPolicy = "RestartContainer" +) + +// ContainerResizePolicy represents resource resize policy for the container. +type ContainerResizePolicy struct { + // Name of the resource to which this resource resize policy applies. + // Supported values: cpu, memory. + ResourceName ResourceName `json:"resourceName" protobuf:"bytes,1,opt,name=resourceName,casttype=ResourceName"` + // Restart policy to apply when specified resource is resized. + // If not specified, it defaults to NotRequired. + RestartPolicy ResourceResizeRestartPolicy `json:"restartPolicy" protobuf:"bytes,2,opt,name=restartPolicy,casttype=ResourceResizeRestartPolicy"` +} + // PreemptionPolicy describes a policy for if/when to preempt a pod. // +enum type PreemptionPolicy string @@ -2310,7 +2338,7 @@ type ResourceRequirements struct { Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"` // Requests describes the minimum amount of compute resources required. // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value. + // otherwise to an implementation-defined value. Requests cannot exceed Limits. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"` @@ -2413,6 +2441,11 @@ type Container struct { // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Resources resize policy for the container. + // +featureGate=InPlacePodVerticalScaling + // +optional + // +listType=atomic + ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"` // Pod volumes to mount into the container's filesystem. // Cannot be updated. // +optional @@ -2517,8 +2550,6 @@ type ProbeHandler struct { TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` // GRPC specifies an action involving a GRPC port. - // This is a beta field and requires enabling GRPCContainerProbe feature gate. - // +featureGate=GRPCContainerProbe // +optional GRPC *GRPCAction `json:"grpc,omitempty" protobuf:"bytes,4,opt,name=grpc"` } @@ -2632,33 +2663,66 @@ type ContainerState struct { // ContainerStatus contains details for the current status of this container. type ContainerStatus struct { - // This must be a DNS_LABEL. Each container in a pod must have a unique name. + // Name is a DNS_LABEL representing the unique name of the container. + // Each container in a pod must have a unique name across all container types. // Cannot be updated. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Details about the container's current condition. + // State holds details about the container's current condition. // +optional State ContainerState `json:"state,omitempty" protobuf:"bytes,2,opt,name=state"` - // Details about the container's last termination condition. + // LastTerminationState holds the last termination state of the container to + // help debug container crashes and restarts. This field is not + // populated if the container is still running and RestartCount is 0. // +optional LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"` - // Specifies whether the container has passed its readiness probe. + // Ready specifies whether the container is currently passing its readiness check. + // The value will change as readiness probes keep executing. If no readiness + // probes are specified, this field defaults to true once the container is + // fully started (see Started field). + // + // The value is typically used to determine whether a container is ready to + // accept traffic. Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"` - // The number of times the container has been restarted. + // RestartCount holds the number of times the container has been restarted. + // Kubelet makes an effort to always increment the value, but there + // are cases when the state may be lost due to node restarts and then the value + // may be reset to 0. The value is never negative. RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"` - // The image the container is running. + // Image is the name of container image that the container is running. + // The container image may not match the image used in the PodSpec, + // as it may have been resolved by the runtime. // More info: https://kubernetes.io/docs/concepts/containers/images. Image string `json:"image" protobuf:"bytes,6,opt,name=image"` - // ImageID of the container's image. + // ImageID is the image ID of the container's image. The image ID may not + // match the image ID of the image used in the PodSpec, as it may have been + // resolved by the runtime. ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"` - // Container's ID in the format '://'. + // ContainerID is the ID of the container in the format '://'. + // Where type is a container runtime identifier, returned from Version call of CRI API + // (for example "containerd"). // +optional ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"` - // Specifies whether the container has passed its startup probe. - // Initialized as false, becomes true after startupProbe is considered successful. - // Resets to false when the container is restarted, or if kubelet loses state temporarily. - // Is always true when no startupProbe is defined. + // Started indicates whether the container has finished its postStart lifecycle hook + // and passed its startup probe. + // Initialized as false, becomes true after startupProbe is considered + // successful. Resets to false when the container is restarted, or if kubelet + // loses state temporarily. In both cases, startup probes will run again. + // Is always true when no startupProbe is defined and container is running and + // has passed the postStart lifecycle hook. The null value must be treated the + // same as false. // +optional Started *bool `json:"started,omitempty" protobuf:"varint,9,opt,name=started"` + // AllocatedResources represents the compute resources allocated for this container by the + // node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission + // and after successfully admitting desired pod resize. + // +featureGate=InPlacePodVerticalScaling + // +optional + AllocatedResources ResourceList `json:"allocatedResources,omitempty" protobuf:"bytes,10,rep,name=allocatedResources,casttype=ResourceList,castkey=ResourceName"` + // Resources represents the compute resource requests and limits that have been successfully + // enacted on the running container after it has been started or has been successfully resized. + // +featureGate=InPlacePodVerticalScaling + // +optional + Resources *ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,11,opt,name=resources"` } // PodPhase is a label for the condition of a pod at the current time. @@ -2722,6 +2786,10 @@ const ( // TerminationByKubelet reason in DisruptionTarget pod condition indicates that the termination // is initiated by kubelet PodReasonTerminationByKubelet = "TerminationByKubelet" + + // PodReasonPreemptionByScheduler reason in DisruptionTarget pod condition indicates that the + // disruption was initiated by scheduler's preemption. + PodReasonPreemptionByScheduler = "PreemptionByScheduler" ) // PodCondition contains details for the current condition of this pod. @@ -2747,6 +2815,20 @@ type PodCondition struct { Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` } +// PodResizeStatus shows status of desired resize of a pod's containers. +type PodResizeStatus string + +const ( + // Pod resources resize has been requested and will be evaluated by node. + PodResizeStatusProposed PodResizeStatus = "Proposed" + // Pod resources resize has been accepted by node and is being actuated. + PodResizeStatusInProgress PodResizeStatus = "InProgress" + // Node cannot resize the pod at this time and will keep retrying. + PodResizeStatusDeferred PodResizeStatus = "Deferred" + // Requested pod resize is not feasible and will not be re-evaluated. + PodResizeStatusInfeasible PodResizeStatus = "Infeasible" +) + // RestartPolicy describes how the container should be restarted. // Only one of the following restart policies may be specified. // If none of the following policies is specified, the default one @@ -3157,7 +3239,7 @@ type PodSpec struct { // +patchStrategy=merge EphemeralContainers []EphemeralContainer `json:"ephemeralContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,34,rep,name=ephemeralContainers"` // Restart policy for all containers within the pod. - // One of Always, OnFailure, Never. + // One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. // Default to Always. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy // +optional @@ -3383,14 +3465,19 @@ type PodSpec struct { HostUsers *bool `json:"hostUsers,omitempty" protobuf:"bytes,37,opt,name=hostUsers"` // SchedulingGates is an opaque list of values that if specified will block scheduling the pod. - // More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness. + // If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + // scheduler will not attempt to schedule the pod. + // + // SchedulingGates can only be set at pod creation time, and be removed only afterwards. + // + // This is a beta feature enabled by the PodSchedulingReadiness feature gate. // - // This is an alpha-level feature enabled by PodSchedulingReadiness feature gate. - // +optional // +patchMergeKey=name // +patchStrategy=merge // +listType=map // +listMapKey=name + // +featureGate=PodSchedulingReadiness + // +optional SchedulingGates []PodSchedulingGate `json:"schedulingGates,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,38,opt,name=schedulingGates"` // ResourceClaims defines which ResourceClaims must be allocated // and reserved before the Pod is allowed to start. The resources @@ -3611,8 +3698,12 @@ type TopologySpreadConstraint struct { // spreading will be calculated. The keys are used to lookup values from the // incoming pod labels, those key-value labels are ANDed with labelSelector // to select the group of existing pods over which spreading will be calculated - // for the incoming pod. Keys that don't exist in the incoming pod labels will + // for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + // MatchLabelKeys cannot be set when LabelSelector isn't set. + // Keys that don't exist in the incoming pod labels will // be ignored. A null or empty list means only match against labelSelector. + // + // This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). // +listType=atomic // +optional MatchLabelKeys []string `json:"matchLabelKeys,omitempty" protobuf:"bytes,8,opt,name=matchLabelKeys"` @@ -3880,6 +3971,11 @@ type EphemeralContainerCommon struct { // already allocated to the pod. // +optional Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Resources resize policy for the container. + // +featureGate=InPlacePodVerticalScaling + // +optional + // +listType=atomic + ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"` // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. // Cannot be updated. // +optional @@ -4065,12 +4161,19 @@ type PodStatus struct { ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"` // The Quality of Service (QOS) classification assigned to the pod based on resource requirements // See PodQOSClass type for available QOS classes - // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes // +optional QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"` // Status for any ephemeral containers that have run in this pod. // +optional EphemeralContainerStatuses []ContainerStatus `json:"ephemeralContainerStatuses,omitempty" protobuf:"bytes,13,rep,name=ephemeralContainerStatuses"` + + // Status of resources resize desired for pod's containers. + // It is empty if no resources resize is pending. + // Any changes to container resources will automatically set this to "Proposed" + // +featureGate=InPlacePodVerticalScaling + // +optional + Resize PodResizeStatus `json:"resize,omitempty" protobuf:"bytes,14,opt,name=resize,casttype=PodResizeStatus"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -4209,6 +4312,7 @@ type ReplicationControllerSpec struct { // Template is the object that describes the pod that will be created if // insufficient replicas are detected. This takes precedence over a TemplateRef. + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` @@ -4369,34 +4473,47 @@ const ( ServiceTypeExternalName ServiceType = "ExternalName" ) -// ServiceInternalTrafficPolicyType describes how nodes distribute service traffic they +// ServiceInternalTrafficPolicy describes how nodes distribute service traffic they // receive on the ClusterIP. // +enum -type ServiceInternalTrafficPolicyType string +type ServiceInternalTrafficPolicy string const ( // ServiceInternalTrafficPolicyCluster routes traffic to all endpoints. - ServiceInternalTrafficPolicyCluster ServiceInternalTrafficPolicyType = "Cluster" + ServiceInternalTrafficPolicyCluster ServiceInternalTrafficPolicy = "Cluster" // ServiceInternalTrafficPolicyLocal routes traffic only to endpoints on the same // node as the client pod (dropping the traffic if there are no local endpoints). - ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicyType = "Local" + ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicy = "Local" ) -// ServiceExternalTrafficPolicyType describes how nodes distribute service traffic they +// for backwards compat +// +enum +type ServiceInternalTrafficPolicyType = ServiceInternalTrafficPolicy + +// ServiceExternalTrafficPolicy describes how nodes distribute service traffic they // receive on one of the Service's "externally-facing" addresses (NodePorts, ExternalIPs, -// and LoadBalancer IPs). +// and LoadBalancer IPs. // +enum -type ServiceExternalTrafficPolicyType string +type ServiceExternalTrafficPolicy string const ( - // ServiceExternalTrafficPolicyTypeCluster routes traffic to all endpoints. - ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" + // ServiceExternalTrafficPolicyCluster routes traffic to all endpoints. + ServiceExternalTrafficPolicyCluster ServiceExternalTrafficPolicy = "Cluster" - // ServiceExternalTrafficPolicyTypeLocal preserves the source IP of the traffic by + // ServiceExternalTrafficPolicyLocal preserves the source IP of the traffic by // routing only to endpoints on the same node as the traffic was received on // (dropping the traffic if there are no local endpoints). - ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local" + ServiceExternalTrafficPolicyLocal ServiceExternalTrafficPolicy = "Local" +) + +// for backwards compat +// +enum +type ServiceExternalTrafficPolicyType = ServiceExternalTrafficPolicy + +const ( + ServiceExternalTrafficPolicyTypeLocal = ServiceExternalTrafficPolicyLocal + ServiceExternalTrafficPolicyTypeCluster = ServiceExternalTrafficPolicyCluster ) // These are the valid conditions of a service. @@ -4404,6 +4521,9 @@ const ( // LoadBalancerPortsError represents the condition of the requested ports // on the cloud load balancer instance. LoadBalancerPortsError = "LoadBalancerPortsError" + // LoadBalancerPortsErrorReason reason in ServiceStatus condition LoadBalancerPortsError + // means the LoadBalancer was not able to be configured correctly. + LoadBalancerPortsErrorReason = "LoadBalancerMixedProtocolNotSupported" ) // ServiceStatus represents the current status of a service. @@ -4629,7 +4749,7 @@ type ServiceSpec struct { // a NodePort from within the cluster may need to take traffic policy into account // when picking a node. // +optional - ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"` + ExternalTrafficPolicy ServiceExternalTrafficPolicy `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"` // healthCheckNodePort specifies the healthcheck nodePort for the service. // This only applies when type is set to LoadBalancer and @@ -4726,7 +4846,7 @@ type ServiceSpec struct { // "Cluster", uses the standard behavior of routing to all endpoints evenly // (possibly modified by topology and other features). // +optional - InternalTrafficPolicy *ServiceInternalTrafficPolicyType `json:"internalTrafficPolicy,omitempty" protobuf:"bytes,22,opt,name=internalTrafficPolicy"` + InternalTrafficPolicy *ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty" protobuf:"bytes,22,opt,name=internalTrafficPolicy"` } // ServicePort contains information on service's port. @@ -4947,11 +5067,8 @@ type EndpointSubset struct { // +structType=atomic type EndpointAddress struct { // The IP of this endpoint. - // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), - // or link-local multicast ((224.0.0.0/24). - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, See #4447. + // May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), + // or link-local multicast (224.0.0.0/24 or ff02::/16). IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` // The Hostname of this endpoint // +optional @@ -4984,10 +5101,17 @@ type EndpointPort struct { Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"` // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,opt,name=appProtocol"` @@ -5206,6 +5330,10 @@ type NodeStatus struct { // Note: This field is declared as mergeable, but the merge key is not sufficiently // unique, which can cause data corruption when it is merged. Callers should instead // use a full-replacement patch. See https://pr.k8s.io/79391 for an example. + // Consumers should assume that addresses can change during the + // lifetime of a Node. However, there are some exceptions where this may not + // be possible, such as Pods that inherit a Node's address in its own status or + // consumers of the downward API (status.hostIP). // +optional // +patchMergeKey=type // +patchStrategy=merge @@ -6760,6 +6888,13 @@ const ( PortForwardRequestIDHeader = "requestID" ) +const ( + // MixedProtocolNotSupported error in PortStatus means that the cloud provider + // can't publish the port on the load balancer because mixed values of protocols + // on the same LoadBalancer type of Service are not supported by the cloud provider. + MixedProtocolNotSupported = "MixedProtocolNotSupported" +) + // PortStatus represents the error condition of a service port type PortStatus struct { diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 99391a423..a01ae3717 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AWSElasticBlockStoreVolumeSource = map[string]string{ @@ -126,8 +126,8 @@ var map_CSIPersistentVolumeSource = map[string]string{ "controllerPublishSecretRef": "controllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", "nodeStageSecretRef": "nodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", "nodePublishSecretRef": "nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "controllerExpandSecretRef": "controllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This is an beta field and requires enabling ExpandCSIVolumes feature gate. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "nodeExpandSecretRef": "nodeExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeExpandVolume call. This is an alpha field and requires enabling CSINodeExpandSecret feature gate. This field is optional, may be omitted if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "controllerExpandSecretRef": "controllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodeExpandSecretRef": "nodeExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeExpandVolume call. This is a beta field which is enabled default by CSINodeExpandSecret feature gate. This field is optional, may be omitted if no secret is required. If the secret object contains more than one secret, all secrets are passed.", } func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -346,6 +346,7 @@ var map_Container = map[string]string{ "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "resizePolicy": "Resources resize policy for the container.", "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", @@ -388,6 +389,16 @@ func (ContainerPort) SwaggerDoc() map[string]string { return map_ContainerPort } +var map_ContainerResizePolicy = map[string]string{ + "": "ContainerResizePolicy represents resource resize policy for the container.", + "resourceName": "Name of the resource to which this resource resize policy applies. Supported values: cpu, memory.", + "restartPolicy": "Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired.", +} + +func (ContainerResizePolicy) SwaggerDoc() map[string]string { + return map_ContainerResizePolicy +} + var map_ContainerState = map[string]string{ "": "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.", "waiting": "Details about a waiting container", @@ -434,16 +445,18 @@ func (ContainerStateWaiting) SwaggerDoc() map[string]string { } var map_ContainerStatus = map[string]string{ - "": "ContainerStatus contains details for the current status of this container.", - "name": "This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.", - "state": "Details about the container's current condition.", - "lastState": "Details about the container's last termination condition.", - "ready": "Specifies whether the container has passed its readiness probe.", - "restartCount": "The number of times the container has been restarted.", - "image": "The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images.", - "imageID": "ImageID of the container's image.", - "containerID": "Container's ID in the format '://'.", - "started": "Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined.", + "": "ContainerStatus contains details for the current status of this container.", + "name": "Name is a DNS_LABEL representing the unique name of the container. Each container in a pod must have a unique name across all container types. Cannot be updated.", + "state": "State holds details about the container's current condition.", + "lastState": "LastTerminationState holds the last termination state of the container to help debug container crashes and restarts. This field is not populated if the container is still running and RestartCount is 0.", + "ready": "Ready specifies whether the container is currently passing its readiness check. The value will change as readiness probes keep executing. If no readiness probes are specified, this field defaults to true once the container is fully started (see Started field).\n\nThe value is typically used to determine whether a container is ready to accept traffic.", + "restartCount": "RestartCount holds the number of times the container has been restarted. Kubelet makes an effort to always increment the value, but there are cases when the state may be lost due to node restarts and then the value may be reset to 0. The value is never negative.", + "image": "Image is the name of container image that the container is running. The container image may not match the image used in the PodSpec, as it may have been resolved by the runtime. More info: https://kubernetes.io/docs/concepts/containers/images.", + "imageID": "ImageID is the image ID of the container's image. The image ID may not match the image ID of the image used in the PodSpec, as it may have been resolved by the runtime.", + "containerID": "ContainerID is the ID of the container in the format '://'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \"containerd\").", + "started": "Started indicates whether the container has finished its postStart lifecycle hook and passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. In both cases, startup probes will run again. Is always true when no startupProbe is defined and container is running and has passed the postStart lifecycle hook. The null value must be treated the same as false.", + "allocatedResources": "AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize.", + "resources": "Resources represents the compute resource requests and limits that have been successfully enacted on the running container after it has been started or has been successfully resized.", } func (ContainerStatus) SwaggerDoc() map[string]string { @@ -493,7 +506,7 @@ func (DownwardAPIVolumeSource) SwaggerDoc() map[string]string { var map_EmptyDirVolumeSource = map[string]string{ "": "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.", "medium": "medium represents what type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", - "sizeLimit": "sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir", + "sizeLimit": "sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", } func (EmptyDirVolumeSource) SwaggerDoc() map[string]string { @@ -502,7 +515,7 @@ func (EmptyDirVolumeSource) SwaggerDoc() map[string]string { var map_EndpointAddress = map[string]string{ "": "EndpointAddress is a tuple that describes single IP address.", - "ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready.", + "ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), or link-local multicast (224.0.0.0/24 or ff02::/16).", "hostname": "The Hostname of this endpoint", "nodeName": "Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.", "targetRef": "Reference to object providing the endpoint.", @@ -517,7 +530,7 @@ var map_EndpointPort = map[string]string{ "name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.", "port": "The port number of the endpoint.", "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -609,6 +622,7 @@ var map_EphemeralContainerCommon = map[string]string{ "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.", + "resizePolicy": "Resources resize policy for the container.", "volumeMounts": "Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.", "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", "livenessProbe": "Probes are not allowed for ephemeral containers.", @@ -818,7 +832,7 @@ func (HTTPGetAction) SwaggerDoc() map[string]string { var map_HTTPHeader = map[string]string{ "": "HTTPHeader describes a custom header to be used in HTTP probes", - "name": "The header field name", + "name": "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header.", "value": "The header field value", } @@ -1213,7 +1227,7 @@ var map_NodeStatus = map[string]string{ "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", - "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example.", + "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).", "daemonEndpoints": "Endpoints of daemons running on the Node.", "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info", "images": "List of container images on this node", @@ -1292,7 +1306,7 @@ func (PersistentVolumeClaim) SwaggerDoc() map[string]string { } var map_PersistentVolumeClaimCondition = map[string]string{ - "": "PersistentVolumeClaimCondition contails details about state of pvc", + "": "PersistentVolumeClaimCondition contains details about state of pvc", "lastProbeTime": "lastProbeTime is the time we probed the condition.", "lastTransitionTime": "lastTransitionTime is the time the condition transitioned from one status to another.", "reason": "reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.", @@ -1668,7 +1682,7 @@ var map_PodSpec = map[string]string{ "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.", "ephemeralContainers": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.", - "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", + "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", "dnsPolicy": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", @@ -1701,7 +1715,7 @@ var map_PodSpec = map[string]string{ "setHostnameAsFQDN": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.", "os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup", "hostUsers": "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.", - "schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness.\n\nThis is an alpha-level feature enabled by PodSchedulingReadiness feature gate.", + "schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\n\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate.", "resourceClaims": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.", } @@ -1722,8 +1736,9 @@ var map_PodStatus = map[string]string{ "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", "containerStatuses": "The list has one entry per container in the manifest. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", + "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes", "ephemeralContainerStatuses": "Status for any ephemeral containers that have run in this pod.", + "resize": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\"", } func (PodStatus) SwaggerDoc() map[string]string { @@ -1841,7 +1856,7 @@ var map_ProbeHandler = map[string]string{ "exec": "Exec specifies the action to take.", "httpGet": "HTTPGet specifies the http request to perform.", "tcpSocket": "TCPSocket specifies an action involving a TCP port.", - "grpc": "GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate.", + "grpc": "GRPC specifies an action involving a GRPC port.", } func (ProbeHandler) SwaggerDoc() map[string]string { @@ -1954,7 +1969,7 @@ var map_ReplicationControllerSpec = map[string]string{ "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "selector": "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. The only allowed template.spec.restartPolicy value is \"Always\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", } func (ReplicationControllerSpec) SwaggerDoc() map[string]string { @@ -2040,7 +2055,7 @@ func (ResourceQuotaStatus) SwaggerDoc() map[string]string { var map_ResourceRequirements = map[string]string{ "": "ResourceRequirements describes the compute resource requirements.", "limits": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", - "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "claims": "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.", } @@ -2450,7 +2465,7 @@ var map_TopologySpreadConstraint = map[string]string{ "minDomains": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: ", "nodeAffinityPolicy": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.", "nodeTaintsPolicy": "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.", - "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.", + "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.\n\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).", } func (TopologySpreadConstraint) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 2bf1c8ad6..bfb7e0bff 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -788,6 +788,11 @@ func (in *Container) DeepCopyInto(out *Container) { } } in.Resources.DeepCopyInto(&out.Resources) + if in.ResizePolicy != nil { + in, out := &in.ResizePolicy, &out.ResizePolicy + *out = make([]ContainerResizePolicy, len(*in)) + copy(*out, *in) + } if in.VolumeMounts != nil { in, out := &in.VolumeMounts, &out.VolumeMounts *out = make([]VolumeMount, len(*in)) @@ -875,6 +880,22 @@ func (in *ContainerPort) DeepCopy() *ContainerPort { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResizePolicy) DeepCopyInto(out *ContainerResizePolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResizePolicy. +func (in *ContainerResizePolicy) DeepCopy() *ContainerResizePolicy { + if in == nil { + return nil + } + out := new(ContainerResizePolicy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ContainerState) DeepCopyInto(out *ContainerState) { *out = *in @@ -967,6 +988,18 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { *out = new(bool) **out = **in } + if in.AllocatedResources != nil { + in, out := &in.AllocatedResources, &out.AllocatedResources + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } return } @@ -1382,6 +1415,11 @@ func (in *EphemeralContainerCommon) DeepCopyInto(out *EphemeralContainerCommon) } } in.Resources.DeepCopyInto(&out.Resources) + if in.ResizePolicy != nil { + in, out := &in.ResizePolicy, &out.ResizePolicy + *out = make([]ContainerResizePolicy, len(*in)) + copy(*out, *in) + } if in.VolumeMounts != nil { in, out := &in.VolumeMounts, &out.VolumeMounts *out = make([]VolumeMount, len(*in)) @@ -5517,7 +5555,7 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { } if in.InternalTrafficPolicy != nil { in, out := &in.InternalTrafficPolicy, &out.InternalTrafficPolicy - *out = new(ServiceInternalTrafficPolicyType) + *out = new(ServiceInternalTrafficPolicy) **out = **in } return diff --git a/vendor/k8s.io/api/discovery/v1/generated.proto b/vendor/k8s.io/api/discovery/v1/generated.proto index 9cbe46394..b7150ef2c 100644 --- a/vendor/k8s.io/api/discovery/v1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1/generated.proto @@ -86,7 +86,9 @@ message EndpointConditions { // according to whatever system is managing the endpoint. A nil value // indicates an unknown state. In most cases consumers should interpret this // unknown state as ready. For compatibility reasons, ready should never be - // "true" for terminating endpoints. + // "true" for terminating endpoints, except when the normal readiness + // behavior is being explicitly overridden, for example when the associated + // Service has set the publishNotReadyAddresses flag. // +optional optional bool ready = 1; @@ -115,9 +117,8 @@ message EndpointHints { // EndpointPort represents a Port used by an EndpointSlice // +structType=atomic message EndpointPort { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. @@ -125,21 +126,28 @@ message EndpointPort { // Default is empty string. optional string name = 1; - // The IP protocol for this port. + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. optional string protocol = 2; - // The port number of the endpoint. + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. optional int32 port = 3; // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional optional string appProtocol = 4; @@ -183,7 +191,7 @@ message EndpointSliceList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // List of endpoint slices + // items is the list of endpoint slices repeated EndpointSlice items = 2; } diff --git a/vendor/k8s.io/api/discovery/v1/types.go b/vendor/k8s.io/api/discovery/v1/types.go index 2df80c3d5..9b4daafca 100644 --- a/vendor/k8s.io/api/discovery/v1/types.go +++ b/vendor/k8s.io/api/discovery/v1/types.go @@ -29,9 +29,11 @@ import ( // labels, which must be joined to produce the full set of endpoints. type EndpointSlice struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // addressType specifies the type of address carried by this EndpointSlice. // All addresses in this slice must be the same type. This field is // immutable after creation. The following address types are currently @@ -40,10 +42,12 @@ type EndpointSlice struct { // * IPv6: Represents an IPv6 Address. // * FQDN: Represents a Fully Qualified Domain Name. AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"` + // endpoints is a list of unique endpoints in this slice. Each slice may // include a maximum of 1000 endpoints. // +listType=atomic Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"` + // ports specifies the list of network ports exposed by each endpoint in // this slice. Each port must have a unique name. When ports is empty, it // indicates that there are no defined ports. When a port is defined with a @@ -61,8 +65,10 @@ type AddressType string const ( // AddressTypeIPv4 represents an IPv4 Address. AddressTypeIPv4 = AddressType(v1.IPv4Protocol) + // AddressTypeIPv6 represents an IPv6 Address. AddressTypeIPv6 = AddressType(v1.IPv6Protocol) + // AddressTypeFQDN represents a FQDN. AddressTypeFQDN = AddressType("FQDN") ) @@ -77,8 +83,10 @@ type Endpoint struct { // use the first element. Refer to: https://issue.k8s.io/106267 // +listType=set Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"` + // conditions contains information about the current status of the endpoint. Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"` + // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered @@ -86,6 +94,7 @@ type Endpoint struct { // Label (RFC 1123) validation. // +optional Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` + // targetRef is a reference to a Kubernetes object that represents this // endpoint. // +optional @@ -104,9 +113,11 @@ type Endpoint struct { // be used to determine endpoints local to a Node. // +optional NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,6,opt,name=nodeName"` + // zone is the name of the Zone this endpoint exists in. // +optional Zone *string `json:"zone,omitempty" protobuf:"bytes,7,opt,name=zone"` + // hints contains information associated with how an endpoint should be // consumed. // +optional @@ -119,7 +130,9 @@ type EndpointConditions struct { // according to whatever system is managing the endpoint. A nil value // indicates an unknown state. In most cases consumers should interpret this // unknown state as ready. For compatibility reasons, ready should never be - // "true" for terminating endpoints. + // "true" for terminating endpoints, except when the normal readiness + // behavior is being explicitly overridden, for example when the associated + // Service has set the publishNotReadyAddresses flag. // +optional Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"` @@ -154,28 +167,37 @@ type ForZone struct { // EndpointPort represents a Port used by an EndpointSlice // +structType=atomic type EndpointPort struct { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. // * must start and end with an alphanumeric character. // Default is empty string. Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"` - // The IP protocol for this port. + + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"` - // The port number of the endpoint. + + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"` + // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,name=appProtocol"` @@ -186,9 +208,11 @@ type EndpointPort struct { // EndpointSliceList represents a list of endpoint slices type EndpointSliceList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // List of endpoint slices + + // items is the list of endpoint slices Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go index 746408b66..c780c9573 100644 --- a/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Endpoint = map[string]string{ @@ -45,7 +45,7 @@ func (Endpoint) SwaggerDoc() map[string]string { var map_EndpointConditions = map[string]string{ "": "EndpointConditions represents the current condition of an endpoint.", - "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints.", + "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints, except when the normal readiness behavior is being explicitly overridden, for example when the associated Service has set the publishNotReadyAddresses flag.", "serving": "serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition.", "terminating": "terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating.", } @@ -65,10 +65,10 @@ func (EndpointHints) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort represents a Port used by an EndpointSlice", - "name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", - "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", + "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -90,7 +90,7 @@ func (EndpointSlice) SwaggerDoc() map[string]string { var map_EndpointSliceList = map[string]string{ "": "EndpointSliceList represents a list of endpoint slices", "metadata": "Standard list metadata.", - "items": "List of endpoint slices", + "items": "items is the list of endpoint slices", } func (EndpointSliceList) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.proto b/vendor/k8s.io/api/discovery/v1beta1/generated.proto index 2979e64a7..8b6c360b0 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1beta1/generated.proto @@ -118,9 +118,8 @@ message EndpointHints { // EndpointPort represents a Port used by an EndpointSlice message EndpointPort { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. @@ -128,17 +127,17 @@ message EndpointPort { // Default is empty string. optional string name = 1; - // The IP protocol for this port. + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. optional string protocol = 2; - // The port number of the endpoint. + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. optional int32 port = 3; - // The application protocol for this port. + // appProtocol represents the application protocol for this port. // This field follows standard Kubernetes label syntax. // Un-prefixed names are reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). @@ -186,7 +185,7 @@ message EndpointSliceList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // List of endpoint slices + // items is the list of endpoint slices repeated EndpointSlice items = 2; } diff --git a/vendor/k8s.io/api/discovery/v1beta1/types.go b/vendor/k8s.io/api/discovery/v1beta1/types.go index 7a02bead5..f09f7f320 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/types.go +++ b/vendor/k8s.io/api/discovery/v1beta1/types.go @@ -33,9 +33,11 @@ import ( // labels, which must be joined to produce the full set of endpoints. type EndpointSlice struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // addressType specifies the type of address carried by this EndpointSlice. // All addresses in this slice must be the same type. This field is // immutable after creation. The following address types are currently @@ -44,10 +46,12 @@ type EndpointSlice struct { // * IPv6: Represents an IPv6 Address. // * FQDN: Represents a Fully Qualified Domain Name. AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"` + // endpoints is a list of unique endpoints in this slice. Each slice may // include a maximum of 1000 endpoints. // +listType=atomic Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"` + // ports specifies the list of network ports exposed by each endpoint in // this slice. Each port must have a unique name. When ports is empty, it // indicates that there are no defined ports. When a port is defined with a @@ -64,8 +68,10 @@ type AddressType string const ( // AddressTypeIPv4 represents an IPv4 Address. AddressTypeIPv4 = AddressType(v1.IPv4Protocol) + // AddressTypeIPv6 represents an IPv6 Address. AddressTypeIPv6 = AddressType(v1.IPv6Protocol) + // AddressTypeFQDN represents a FQDN. AddressTypeFQDN = AddressType("FQDN") ) @@ -80,8 +86,10 @@ type Endpoint struct { // use the first element. Refer to: https://issue.k8s.io/106267 // +listType=set Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"` + // conditions contains information about the current status of the endpoint. Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"` + // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered @@ -89,10 +97,12 @@ type Endpoint struct { // Label (RFC 1123) validation. // +optional Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` + // targetRef is a reference to a Kubernetes object that represents this // endpoint. // +optional TargetRef *v1.ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,4,opt,name=targetRef"` + // topology contains arbitrary topology information associated with the // endpoint. These key/value pairs must conform with the label format. // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels @@ -108,10 +118,12 @@ type Endpoint struct { // This field is deprecated and will be removed in future api versions. // +optional Topology map[string]string `json:"topology,omitempty" protobuf:"bytes,5,opt,name=topology"` + // nodeName represents the name of the Node hosting this endpoint. This can // be used to determine endpoints local to a Node. // +optional NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,6,opt,name=nodeName"` + // hints contains information associated with how an endpoint should be // consumed. // +featureGate=TopologyAwareHints @@ -159,24 +171,26 @@ type ForZone struct { // EndpointPort represents a Port used by an EndpointSlice type EndpointPort struct { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. // * must start and end with an alphanumeric character. // Default is empty string. Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"` - // The IP protocol for this port. + + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"` - // The port number of the endpoint. + + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"` - // The application protocol for this port. + + // appProtocol represents the application protocol for this port. // This field follows standard Kubernetes label syntax. // Un-prefixed names are reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). @@ -195,9 +209,11 @@ type EndpointPort struct { // EndpointSliceList represents a list of endpoint slices type EndpointSliceList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // List of endpoint slices + + // items is the list of endpoint slices Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go index e1c974b39..b1d4c306c 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Endpoint = map[string]string{ @@ -64,10 +64,10 @@ func (EndpointHints) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort represents a Port used by an EndpointSlice", - "name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", - "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", + "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", + "appProtocol": "appProtocol represents the application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -89,7 +89,7 @@ func (EndpointSlice) SwaggerDoc() map[string]string { var map_EndpointSliceList = map[string]string{ "": "EndpointSliceList represents a list of endpoint slices", "metadata": "Standard list metadata.", - "items": "List of endpoint slices", + "items": "items is the list of endpoint slices", } func (EndpointSliceList) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go index 797da63bb..44ac0c3bb 100644 --- a/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Event = map[string]string{ diff --git a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go index 0e6bd5a83..e6c28a4f8 100644 --- a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Event = map[string]string{ diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index 333142b3e..863ebbc4a 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -49,94 +49,10 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } -func (*AllowedCSIDriver) ProtoMessage() {} -func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{0} -} -func (m *AllowedCSIDriver) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedCSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedCSIDriver) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedCSIDriver.Merge(m, src) -} -func (m *AllowedCSIDriver) XXX_Size() int { - return m.Size() -} -func (m *AllowedCSIDriver) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedCSIDriver.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedCSIDriver proto.InternalMessageInfo - -func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } -func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{1} -} -func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedFlexVolume.Merge(m, src) -} -func (m *AllowedFlexVolume) XXX_Size() int { - return m.Size() -} -func (m *AllowedFlexVolume) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo - -func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } -func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{2} -} -func (m *AllowedHostPath) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedHostPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedHostPath) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedHostPath.Merge(m, src) -} -func (m *AllowedHostPath) XXX_Size() int { - return m.Size() -} -func (m *AllowedHostPath) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedHostPath.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedHostPath proto.InternalMessageInfo - func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} func (*DaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{3} + return fileDescriptor_cdc93917efc28165, []int{0} } func (m *DaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -164,7 +80,7 @@ var xxx_messageInfo_DaemonSet proto.InternalMessageInfo func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } func (*DaemonSetCondition) ProtoMessage() {} func (*DaemonSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{4} + return fileDescriptor_cdc93917efc28165, []int{1} } func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -192,7 +108,7 @@ var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} func (*DaemonSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{5} + return fileDescriptor_cdc93917efc28165, []int{2} } func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -220,7 +136,7 @@ var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} func (*DaemonSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{6} + return fileDescriptor_cdc93917efc28165, []int{3} } func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -248,7 +164,7 @@ var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} func (*DaemonSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{7} + return fileDescriptor_cdc93917efc28165, []int{4} } func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -276,7 +192,7 @@ var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } func (*DaemonSetUpdateStrategy) ProtoMessage() {} func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{8} + return fileDescriptor_cdc93917efc28165, []int{5} } func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -304,7 +220,7 @@ var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} func (*Deployment) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{9} + return fileDescriptor_cdc93917efc28165, []int{6} } func (m *Deployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -332,7 +248,7 @@ var xxx_messageInfo_Deployment proto.InternalMessageInfo func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} func (*DeploymentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{10} + return fileDescriptor_cdc93917efc28165, []int{7} } func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -360,7 +276,7 @@ var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} func (*DeploymentList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{11} + return fileDescriptor_cdc93917efc28165, []int{8} } func (m *DeploymentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -388,7 +304,7 @@ var xxx_messageInfo_DeploymentList proto.InternalMessageInfo func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } func (*DeploymentRollback) ProtoMessage() {} func (*DeploymentRollback) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{12} + return fileDescriptor_cdc93917efc28165, []int{9} } func (m *DeploymentRollback) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -416,7 +332,7 @@ var xxx_messageInfo_DeploymentRollback proto.InternalMessageInfo func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} func (*DeploymentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{13} + return fileDescriptor_cdc93917efc28165, []int{10} } func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -444,7 +360,7 @@ var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} func (*DeploymentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{14} + return fileDescriptor_cdc93917efc28165, []int{11} } func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -472,7 +388,7 @@ var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} func (*DeploymentStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{15} + return fileDescriptor_cdc93917efc28165, []int{12} } func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -497,38 +413,10 @@ func (m *DeploymentStrategy) XXX_DiscardUnknown() { var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo -func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } -func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{16} -} -func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) -} -func (m *FSGroupStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo - func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } func (*HTTPIngressPath) ProtoMessage() {} func (*HTTPIngressPath) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{17} + return fileDescriptor_cdc93917efc28165, []int{13} } func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +444,7 @@ var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } func (*HTTPIngressRuleValue) ProtoMessage() {} func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{18} + return fileDescriptor_cdc93917efc28165, []int{14} } func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -581,66 +469,10 @@ func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() { var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo -func (m *HostPortRange) Reset() { *m = HostPortRange{} } -func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{19} -} -func (m *HostPortRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HostPortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HostPortRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_HostPortRange.Merge(m, src) -} -func (m *HostPortRange) XXX_Size() int { - return m.Size() -} -func (m *HostPortRange) XXX_DiscardUnknown() { - xxx_messageInfo_HostPortRange.DiscardUnknown(m) -} - -var xxx_messageInfo_HostPortRange proto.InternalMessageInfo - -func (m *IDRange) Reset() { *m = IDRange{} } -func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{20} -} -func (m *IDRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IDRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_IDRange.Merge(m, src) -} -func (m *IDRange) XXX_Size() int { - return m.Size() -} -func (m *IDRange) XXX_DiscardUnknown() { - xxx_messageInfo_IDRange.DiscardUnknown(m) -} - -var xxx_messageInfo_IDRange proto.InternalMessageInfo - func (m *IPBlock) Reset() { *m = IPBlock{} } func (*IPBlock) ProtoMessage() {} func (*IPBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{21} + return fileDescriptor_cdc93917efc28165, []int{15} } func (m *IPBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +500,7 @@ var xxx_messageInfo_IPBlock proto.InternalMessageInfo func (m *Ingress) Reset() { *m = Ingress{} } func (*Ingress) ProtoMessage() {} func (*Ingress) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{22} + return fileDescriptor_cdc93917efc28165, []int{16} } func (m *Ingress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +528,7 @@ var xxx_messageInfo_Ingress proto.InternalMessageInfo func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (*IngressBackend) ProtoMessage() {} func (*IngressBackend) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{23} + return fileDescriptor_cdc93917efc28165, []int{17} } func (m *IngressBackend) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +556,7 @@ var xxx_messageInfo_IngressBackend proto.InternalMessageInfo func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} func (*IngressList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{24} + return fileDescriptor_cdc93917efc28165, []int{18} } func (m *IngressList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -752,7 +584,7 @@ var xxx_messageInfo_IngressList proto.InternalMessageInfo func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} } func (*IngressLoadBalancerIngress) ProtoMessage() {} func (*IngressLoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{25} + return fileDescriptor_cdc93917efc28165, []int{19} } func (m *IngressLoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -780,7 +612,7 @@ var xxx_messageInfo_IngressLoadBalancerIngress proto.InternalMessageInfo func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} } func (*IngressLoadBalancerStatus) ProtoMessage() {} func (*IngressLoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{26} + return fileDescriptor_cdc93917efc28165, []int{20} } func (m *IngressLoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -808,7 +640,7 @@ var xxx_messageInfo_IngressLoadBalancerStatus proto.InternalMessageInfo func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} } func (*IngressPortStatus) ProtoMessage() {} func (*IngressPortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{27} + return fileDescriptor_cdc93917efc28165, []int{21} } func (m *IngressPortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -836,7 +668,7 @@ var xxx_messageInfo_IngressPortStatus proto.InternalMessageInfo func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} func (*IngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{28} + return fileDescriptor_cdc93917efc28165, []int{22} } func (m *IngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -864,7 +696,7 @@ var xxx_messageInfo_IngressRule proto.InternalMessageInfo func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} func (*IngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{29} + return fileDescriptor_cdc93917efc28165, []int{23} } func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -892,7 +724,7 @@ var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} func (*IngressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{30} + return fileDescriptor_cdc93917efc28165, []int{24} } func (m *IngressSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -920,7 +752,7 @@ var xxx_messageInfo_IngressSpec proto.InternalMessageInfo func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} func (*IngressStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{31} + return fileDescriptor_cdc93917efc28165, []int{25} } func (m *IngressStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -948,7 +780,7 @@ var xxx_messageInfo_IngressStatus proto.InternalMessageInfo func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} func (*IngressTLS) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{32} + return fileDescriptor_cdc93917efc28165, []int{26} } func (m *IngressTLS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -976,7 +808,7 @@ var xxx_messageInfo_IngressTLS proto.InternalMessageInfo func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } func (*NetworkPolicy) ProtoMessage() {} func (*NetworkPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{33} + return fileDescriptor_cdc93917efc28165, []int{27} } func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1004,7 +836,7 @@ var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } func (*NetworkPolicyEgressRule) ProtoMessage() {} func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{34} + return fileDescriptor_cdc93917efc28165, []int{28} } func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1032,7 +864,7 @@ var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{35} + return fileDescriptor_cdc93917efc28165, []int{29} } func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1060,7 +892,7 @@ var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } func (*NetworkPolicyList) ProtoMessage() {} func (*NetworkPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{36} + return fileDescriptor_cdc93917efc28165, []int{30} } func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1088,7 +920,7 @@ var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } func (*NetworkPolicyPeer) ProtoMessage() {} func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{37} + return fileDescriptor_cdc93917efc28165, []int{31} } func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1116,7 +948,7 @@ var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } func (*NetworkPolicyPort) ProtoMessage() {} func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{38} + return fileDescriptor_cdc93917efc28165, []int{32} } func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1144,7 +976,7 @@ var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } func (*NetworkPolicySpec) ProtoMessage() {} func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{39} + return fileDescriptor_cdc93917efc28165, []int{33} } func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1172,7 +1004,7 @@ var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo func (m *NetworkPolicyStatus) Reset() { *m = NetworkPolicyStatus{} } func (*NetworkPolicyStatus) ProtoMessage() {} func (*NetworkPolicyStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{40} + return fileDescriptor_cdc93917efc28165, []int{34} } func (m *NetworkPolicyStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1197,94 +1029,10 @@ func (m *NetworkPolicyStatus) XXX_DiscardUnknown() { var xxx_messageInfo_NetworkPolicyStatus proto.InternalMessageInfo -func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } -func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{41} -} -func (m *PodSecurityPolicy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicy) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicy.Merge(m, src) -} -func (m *PodSecurityPolicy) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicy) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicy.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicy proto.InternalMessageInfo - -func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } -func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{42} -} -func (m *PodSecurityPolicyList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicyList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicyList.Merge(m, src) -} -func (m *PodSecurityPolicyList) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicyList) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicyList.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicyList proto.InternalMessageInfo - -func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } -func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{43} -} -func (m *PodSecurityPolicySpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicySpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicySpec.Merge(m, src) -} -func (m *PodSecurityPolicySpec) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicySpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicySpec.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicySpec proto.InternalMessageInfo - func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (*ReplicaSet) ProtoMessage() {} func (*ReplicaSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{44} + return fileDescriptor_cdc93917efc28165, []int{35} } func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1312,7 +1060,7 @@ var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (*ReplicaSetCondition) ProtoMessage() {} func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{45} + return fileDescriptor_cdc93917efc28165, []int{36} } func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1340,7 +1088,7 @@ var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (*ReplicaSetList) ProtoMessage() {} func (*ReplicaSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{46} + return fileDescriptor_cdc93917efc28165, []int{37} } func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1368,7 +1116,7 @@ var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{47} + return fileDescriptor_cdc93917efc28165, []int{38} } func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1396,7 +1144,7 @@ var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{48} + return fileDescriptor_cdc93917efc28165, []int{39} } func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1424,7 +1172,7 @@ var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } func (*RollbackConfig) ProtoMessage() {} func (*RollbackConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{49} + return fileDescriptor_cdc93917efc28165, []int{40} } func (m *RollbackConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1452,7 +1200,7 @@ var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{50} + return fileDescriptor_cdc93917efc28165, []int{41} } func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1480,7 +1228,7 @@ var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{51} + return fileDescriptor_cdc93917efc28165, []int{42} } func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1505,15 +1253,15 @@ func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo -func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } -func (*RunAsGroupStrategyOptions) ProtoMessage() {} -func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{52} +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{43} } -func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error { +func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -1521,27 +1269,27 @@ func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([ } return b[:n], nil } -func (m *RunAsGroupStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RunAsGroupStrategyOptions.Merge(m, src) +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) } -func (m *RunAsGroupStrategyOptions) XXX_Size() int { +func (m *Scale) XXX_Size() int { return m.Size() } -func (m *RunAsGroupStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RunAsGroupStrategyOptions.DiscardUnknown(m) +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) } -var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo +var xxx_messageInfo_Scale proto.InternalMessageInfo -func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } -func (*RunAsUserStrategyOptions) ProtoMessage() {} -func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{53} +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{44} } -func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -1549,126 +1297,14 @@ func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([] } return b[:n], nil } -func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) } -func (m *RunAsUserStrategyOptions) XXX_Size() int { +func (m *ScaleSpec) XXX_Size() int { return m.Size() } -func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo - -func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } -func (*RuntimeClassStrategyOptions) ProtoMessage() {} -func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{54} -} -func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RuntimeClassStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RuntimeClassStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuntimeClassStrategyOptions.Merge(m, src) -} -func (m *RuntimeClassStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RuntimeClassStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RuntimeClassStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo - -func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } -func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{55} -} -func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SELinuxStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SELinuxStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SELinuxStrategyOptions.Merge(m, src) -} -func (m *SELinuxStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *SELinuxStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SELinuxStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo - -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{56} -} -func (m *Scale) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Scale) XXX_Merge(src proto.Message) { - xxx_messageInfo_Scale.Merge(m, src) -} -func (m *Scale) XXX_Size() int { - return m.Size() -} -func (m *Scale) XXX_DiscardUnknown() { - xxx_messageInfo_Scale.DiscardUnknown(m) -} - -var xxx_messageInfo_Scale proto.InternalMessageInfo - -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{57} -} -func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ScaleSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ScaleSpec.Merge(m, src) -} -func (m *ScaleSpec) XXX_Size() int { - return m.Size() -} -func (m *ScaleSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ScaleSpec.DiscardUnknown(m) +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) } var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo @@ -1676,7 +1312,7 @@ var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} func (*ScaleStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{58} + return fileDescriptor_cdc93917efc28165, []int{45} } func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1701,38 +1337,7 @@ func (m *ScaleStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo -func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } -func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} -func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{59} -} -func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) -} -func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo - func init() { - proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.extensions.v1beta1.AllowedCSIDriver") - proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume") - proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.extensions.v1beta1.AllowedHostPath") proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.extensions.v1beta1.DaemonSet") proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetCondition") proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetList") @@ -1747,11 +1352,8 @@ func init() { proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.extensions.v1beta1.DeploymentSpec") proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStatus") proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStrategy") - proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.FSGroupStrategyOptions") proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressPath") proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressRuleValue") - proto.RegisterType((*HostPortRange)(nil), "k8s.io.api.extensions.v1beta1.HostPortRange") - proto.RegisterType((*IDRange)(nil), "k8s.io.api.extensions.v1beta1.IDRange") proto.RegisterType((*IPBlock)(nil), "k8s.io.api.extensions.v1beta1.IPBlock") proto.RegisterType((*Ingress)(nil), "k8s.io.api.extensions.v1beta1.Ingress") proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.extensions.v1beta1.IngressBackend") @@ -1772,9 +1374,6 @@ func init() { proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPort") proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicySpec") proto.RegisterType((*NetworkPolicyStatus)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyStatus") - proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicy") - proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicyList") - proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicySpec") proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSet") proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetCondition") proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetList") @@ -1783,15 +1382,10 @@ func init() { proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.extensions.v1beta1.RollbackConfig") proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDaemonSet") proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDeployment") - proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsGroupStrategyOptions") - proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsUserStrategyOptions") - proto.RegisterType((*RuntimeClassStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RuntimeClassStrategyOptions") - proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SELinuxStrategyOptions") proto.RegisterType((*Scale)(nil), "k8s.io.api.extensions.v1beta1.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.extensions.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus.SelectorEntry") - proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SupplementalGroupsStrategyOptions") } func init() { @@ -1799,344 +1393,188 @@ func init() { } var fileDescriptor_cdc93917efc28165 = []byte{ - // 3920 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5c, 0x4b, 0x6c, 0x1c, 0xd9, - 0x75, 0x55, 0x75, 0x93, 0xec, 0xe6, 0xa5, 0xf8, 0x7b, 0xa4, 0xc8, 0x1e, 0xca, 0x62, 0xcb, 0x35, - 0xc8, 0x44, 0x33, 0xd1, 0x74, 0x5b, 0x1c, 0x49, 0x1e, 0x8f, 0x10, 0x7b, 0xd8, 0xfc, 0x48, 0xb4, - 0xf9, 0xe9, 0x79, 0x4d, 0xca, 0xc6, 0x20, 0xe3, 0xb8, 0x58, 0xfd, 0xd8, 0xac, 0x61, 0x75, 0x55, - 0xa5, 0xaa, 0x9a, 0x66, 0x07, 0x59, 0x24, 0x48, 0x36, 0x06, 0x02, 0x24, 0x1b, 0x27, 0x59, 0x66, - 0x60, 0x20, 0xbb, 0x20, 0xcb, 0x64, 0xe1, 0x18, 0x09, 0xe2, 0x00, 0x42, 0xe0, 0x04, 0x06, 0xb2, - 0x88, 0x57, 0x44, 0x86, 0x5e, 0x05, 0x59, 0x65, 0x17, 0x68, 0x15, 0xbc, 0x4f, 0xfd, 0xab, 0xd8, - 0xd5, 0x8c, 0x44, 0xc4, 0x81, 0x57, 0x62, 0xbd, 0x7b, 0xef, 0x79, 0xbf, 0xfb, 0xee, 0x3d, 0xef, - 0xd3, 0x82, 0xcd, 0x93, 0xf7, 0x9d, 0x9a, 0x66, 0xd6, 0x4f, 0x7a, 0x87, 0xc4, 0x36, 0x88, 0x4b, - 0x9c, 0xfa, 0x29, 0x31, 0xda, 0xa6, 0x5d, 0x17, 0x02, 0xc5, 0xd2, 0xea, 0xe4, 0xcc, 0x25, 0x86, - 0xa3, 0x99, 0x86, 0x53, 0x3f, 0x7d, 0x70, 0x48, 0x5c, 0xe5, 0x41, 0xbd, 0x43, 0x0c, 0x62, 0x2b, - 0x2e, 0x69, 0xd7, 0x2c, 0xdb, 0x74, 0x4d, 0x74, 0x87, 0xab, 0xd7, 0x14, 0x4b, 0xab, 0x05, 0xea, - 0x35, 0xa1, 0xbe, 0xf4, 0x6e, 0x47, 0x73, 0x8f, 0x7b, 0x87, 0x35, 0xd5, 0xec, 0xd6, 0x3b, 0x66, - 0xc7, 0xac, 0x33, 0xab, 0xc3, 0xde, 0x11, 0xfb, 0x62, 0x1f, 0xec, 0x2f, 0x8e, 0xb6, 0x24, 0x87, - 0x2a, 0x57, 0x4d, 0x9b, 0xd4, 0x4f, 0x13, 0x35, 0x2e, 0x3d, 0x0c, 0x74, 0xba, 0x8a, 0x7a, 0xac, - 0x19, 0xc4, 0xee, 0xd7, 0xad, 0x93, 0x0e, 0x2d, 0x70, 0xea, 0x5d, 0xe2, 0x2a, 0x69, 0x56, 0xf5, - 0x2c, 0x2b, 0xbb, 0x67, 0xb8, 0x5a, 0x97, 0x24, 0x0c, 0x1e, 0x0f, 0x32, 0x70, 0xd4, 0x63, 0xd2, - 0x55, 0x12, 0x76, 0xef, 0x65, 0xd9, 0xf5, 0x5c, 0x4d, 0xaf, 0x6b, 0x86, 0xeb, 0xb8, 0x76, 0xdc, - 0x48, 0x7e, 0x08, 0x33, 0xab, 0xba, 0x6e, 0x7e, 0x97, 0xb4, 0xd7, 0x5a, 0x5b, 0xeb, 0xb6, 0x76, - 0x4a, 0x6c, 0x74, 0x17, 0x46, 0x0c, 0xa5, 0x4b, 0x2a, 0xd2, 0x5d, 0xe9, 0xde, 0x78, 0xe3, 0xe6, - 0x8b, 0xf3, 0xea, 0x8d, 0x8b, 0xf3, 0xea, 0xc8, 0xae, 0xd2, 0x25, 0x98, 0x49, 0xe4, 0x27, 0x30, - 0x2b, 0xac, 0x36, 0x75, 0x72, 0xf6, 0xdc, 0xd4, 0x7b, 0x5d, 0x82, 0xde, 0x82, 0xb1, 0x36, 0x03, - 0x10, 0x86, 0x53, 0xc2, 0x70, 0x8c, 0xc3, 0x62, 0x21, 0x95, 0x1d, 0x98, 0x16, 0xc6, 0xcf, 0x4c, - 0xc7, 0x6d, 0x2a, 0xee, 0x31, 0x5a, 0x01, 0xb0, 0x14, 0xf7, 0xb8, 0x69, 0x93, 0x23, 0xed, 0x4c, - 0x98, 0x23, 0x61, 0x0e, 0x4d, 0x5f, 0x82, 0x43, 0x5a, 0xe8, 0x3e, 0x94, 0x6d, 0xa2, 0xb4, 0xf7, - 0x0c, 0xbd, 0x5f, 0x29, 0xdc, 0x95, 0xee, 0x95, 0x1b, 0x33, 0xc2, 0xa2, 0x8c, 0x45, 0x39, 0xf6, - 0x35, 0xe4, 0xef, 0x17, 0x60, 0x7c, 0x5d, 0x21, 0x5d, 0xd3, 0x68, 0x11, 0x17, 0x7d, 0x07, 0xca, - 0x74, 0xba, 0xda, 0x8a, 0xab, 0xb0, 0xda, 0x26, 0x56, 0xbe, 0x54, 0x0b, 0xdc, 0xc9, 0x1f, 0xbd, - 0x9a, 0x75, 0xd2, 0xa1, 0x05, 0x4e, 0x8d, 0x6a, 0xd7, 0x4e, 0x1f, 0xd4, 0xf6, 0x0e, 0x3f, 0x25, - 0xaa, 0xbb, 0x43, 0x5c, 0x25, 0x68, 0x5f, 0x50, 0x86, 0x7d, 0x54, 0xb4, 0x0b, 0x23, 0x8e, 0x45, - 0x54, 0xd6, 0xb2, 0x89, 0x95, 0xfb, 0xb5, 0x4b, 0x9d, 0xb5, 0xe6, 0xb7, 0xac, 0x65, 0x11, 0x35, - 0x18, 0x71, 0xfa, 0x85, 0x19, 0x0e, 0x7a, 0x0e, 0x63, 0x8e, 0xab, 0xb8, 0x3d, 0xa7, 0x52, 0x64, - 0x88, 0xb5, 0xdc, 0x88, 0xcc, 0x2a, 0x98, 0x0c, 0xfe, 0x8d, 0x05, 0x9a, 0xfc, 0x1f, 0x05, 0x40, - 0xbe, 0xee, 0x9a, 0x69, 0xb4, 0x35, 0x57, 0x33, 0x0d, 0xf4, 0x01, 0x8c, 0xb8, 0x7d, 0xcb, 0x73, - 0x81, 0xb7, 0xbc, 0x06, 0xed, 0xf7, 0x2d, 0xf2, 0xf2, 0xbc, 0xba, 0x90, 0xb4, 0xa0, 0x12, 0xcc, - 0x6c, 0xd0, 0xb6, 0xdf, 0xd4, 0x02, 0xb3, 0x7e, 0x18, 0xad, 0xfa, 0xe5, 0x79, 0x35, 0x65, 0xb1, - 0xd5, 0x7c, 0xa4, 0x68, 0x03, 0xd1, 0x29, 0x20, 0x5d, 0x71, 0xdc, 0x7d, 0x5b, 0x31, 0x1c, 0x5e, - 0x93, 0xd6, 0x25, 0x62, 0x10, 0xde, 0xc9, 0x37, 0x69, 0xd4, 0xa2, 0xb1, 0x24, 0x5a, 0x81, 0xb6, - 0x13, 0x68, 0x38, 0xa5, 0x06, 0xea, 0xcd, 0x36, 0x51, 0x1c, 0xd3, 0xa8, 0x8c, 0x44, 0xbd, 0x19, - 0xb3, 0x52, 0x2c, 0xa4, 0xe8, 0x6d, 0x28, 0x75, 0x89, 0xe3, 0x28, 0x1d, 0x52, 0x19, 0x65, 0x8a, - 0xd3, 0x42, 0xb1, 0xb4, 0xc3, 0x8b, 0xb1, 0x27, 0x97, 0x7f, 0x28, 0xc1, 0xa4, 0x3f, 0x72, 0xdb, - 0x9a, 0xe3, 0xa2, 0xdf, 0x48, 0xf8, 0x61, 0x2d, 0x5f, 0x97, 0xa8, 0x35, 0xf3, 0x42, 0xdf, 0xe7, - 0xbd, 0x92, 0x90, 0x0f, 0xee, 0xc0, 0xa8, 0xe6, 0x92, 0x2e, 0x9d, 0x87, 0xe2, 0xbd, 0x89, 0x95, - 0x7b, 0x79, 0x5d, 0xa6, 0x31, 0x29, 0x40, 0x47, 0xb7, 0xa8, 0x39, 0xe6, 0x28, 0xf2, 0x9f, 0x8c, - 0x84, 0x9a, 0x4f, 0x5d, 0x13, 0x7d, 0x02, 0x65, 0x87, 0xe8, 0x44, 0x75, 0x4d, 0x5b, 0x34, 0xff, - 0xbd, 0x9c, 0xcd, 0x57, 0x0e, 0x89, 0xde, 0x12, 0xa6, 0x8d, 0x9b, 0xb4, 0xfd, 0xde, 0x17, 0xf6, - 0x21, 0xd1, 0x47, 0x50, 0x76, 0x49, 0xd7, 0xd2, 0x15, 0x97, 0x88, 0x75, 0xf4, 0x66, 0xb8, 0x0b, - 0xd4, 0x73, 0x28, 0x58, 0xd3, 0x6c, 0xef, 0x0b, 0x35, 0xb6, 0x7c, 0xfc, 0x21, 0xf1, 0x4a, 0xb1, - 0x0f, 0x83, 0x4e, 0x61, 0xaa, 0x67, 0xb5, 0xa9, 0xa6, 0x4b, 0xa3, 0x60, 0xa7, 0x2f, 0x3c, 0xe9, - 0x71, 0xde, 0xb1, 0x39, 0x88, 0x58, 0x37, 0x16, 0x44, 0x5d, 0x53, 0xd1, 0x72, 0x1c, 0xab, 0x05, - 0xad, 0xc2, 0x74, 0x57, 0x33, 0x68, 0x5c, 0xea, 0xb7, 0x88, 0x6a, 0x1a, 0x6d, 0x87, 0xb9, 0xd5, - 0x68, 0x63, 0x51, 0x00, 0x4c, 0xef, 0x44, 0xc5, 0x38, 0xae, 0x8f, 0xbe, 0x0e, 0xc8, 0xeb, 0xc6, - 0x53, 0x1e, 0xc4, 0x35, 0xd3, 0x60, 0x3e, 0x57, 0x0c, 0x9c, 0x7b, 0x3f, 0xa1, 0x81, 0x53, 0xac, - 0xd0, 0x36, 0xcc, 0xdb, 0xe4, 0x54, 0xa3, 0x7d, 0x7c, 0xa6, 0x39, 0xae, 0x69, 0xf7, 0xb7, 0xb5, - 0xae, 0xe6, 0x56, 0xc6, 0x58, 0x9b, 0x2a, 0x17, 0xe7, 0xd5, 0x79, 0x9c, 0x22, 0xc7, 0xa9, 0x56, - 0xf2, 0x9f, 0x8e, 0xc1, 0x74, 0x2c, 0xde, 0xa0, 0xe7, 0xb0, 0xa0, 0xf6, 0x6c, 0x9b, 0x18, 0xee, - 0x6e, 0xaf, 0x7b, 0x48, 0xec, 0x96, 0x7a, 0x4c, 0xda, 0x3d, 0x9d, 0xb4, 0x99, 0xa3, 0x8c, 0x36, - 0x96, 0x45, 0x8b, 0x17, 0xd6, 0x52, 0xb5, 0x70, 0x86, 0x35, 0x1d, 0x05, 0x83, 0x15, 0xed, 0x68, - 0x8e, 0xe3, 0x63, 0x16, 0x18, 0xa6, 0x3f, 0x0a, 0xbb, 0x09, 0x0d, 0x9c, 0x62, 0x45, 0xdb, 0xd8, - 0x26, 0x8e, 0x66, 0x93, 0x76, 0xbc, 0x8d, 0xc5, 0x68, 0x1b, 0xd7, 0x53, 0xb5, 0x70, 0x86, 0x35, - 0x7a, 0x04, 0x13, 0xbc, 0x36, 0x36, 0x7f, 0x62, 0xa2, 0xe7, 0x04, 0xd8, 0xc4, 0x6e, 0x20, 0xc2, - 0x61, 0x3d, 0xda, 0x35, 0xf3, 0xd0, 0x21, 0xf6, 0x29, 0x69, 0x67, 0x4f, 0xf0, 0x5e, 0x42, 0x03, - 0xa7, 0x58, 0xd1, 0xae, 0x71, 0x0f, 0x4c, 0x74, 0x6d, 0x2c, 0xda, 0xb5, 0x83, 0x54, 0x2d, 0x9c, - 0x61, 0x4d, 0xfd, 0x98, 0x37, 0x79, 0xf5, 0x54, 0xd1, 0x74, 0xe5, 0x50, 0x27, 0x95, 0x52, 0xd4, - 0x8f, 0x77, 0xa3, 0x62, 0x1c, 0xd7, 0x47, 0x4f, 0x61, 0x96, 0x17, 0x1d, 0x18, 0x8a, 0x0f, 0x52, - 0x66, 0x20, 0x6f, 0x08, 0x90, 0xd9, 0xdd, 0xb8, 0x02, 0x4e, 0xda, 0xa0, 0x0f, 0x60, 0x4a, 0x35, - 0x75, 0x9d, 0xf9, 0xe3, 0x9a, 0xd9, 0x33, 0xdc, 0xca, 0x38, 0x43, 0x41, 0x74, 0x3d, 0xae, 0x45, - 0x24, 0x38, 0xa6, 0x89, 0x08, 0x80, 0xea, 0x25, 0x1c, 0xa7, 0x02, 0x2c, 0x3e, 0x3e, 0xc8, 0x1b, - 0x03, 0xfc, 0x54, 0x15, 0x70, 0x00, 0xbf, 0xc8, 0xc1, 0x21, 0x60, 0xf9, 0x9f, 0x24, 0x58, 0xcc, - 0x08, 0x1d, 0xe8, 0x6b, 0x91, 0x14, 0xfb, 0x6b, 0xb1, 0x14, 0x7b, 0x3b, 0xc3, 0x2c, 0x94, 0x67, - 0x0d, 0x98, 0xb4, 0x69, 0xaf, 0x8c, 0x0e, 0x57, 0x11, 0x31, 0xf2, 0xd1, 0x80, 0x6e, 0xe0, 0xb0, - 0x4d, 0x10, 0xf3, 0x67, 0x2f, 0xce, 0xab, 0x93, 0x11, 0x19, 0x8e, 0xc2, 0xcb, 0x7f, 0x56, 0x00, - 0x58, 0x27, 0x96, 0x6e, 0xf6, 0xbb, 0xc4, 0xb8, 0x0e, 0x0e, 0xb5, 0x17, 0xe1, 0x50, 0xef, 0x0e, - 0x9a, 0x1e, 0xbf, 0x69, 0x99, 0x24, 0xea, 0x9b, 0x31, 0x12, 0x55, 0xcf, 0x0f, 0x79, 0x39, 0x8b, - 0xfa, 0xb7, 0x22, 0xcc, 0x05, 0xca, 0x01, 0x8d, 0x7a, 0x12, 0x99, 0xe3, 0x5f, 0x8d, 0xcd, 0xf1, - 0x62, 0x8a, 0xc9, 0x6b, 0xe3, 0x51, 0x9f, 0xc2, 0x14, 0x65, 0x39, 0x7c, 0x2e, 0x19, 0x87, 0x1a, - 0x1b, 0x9a, 0x43, 0xf9, 0xd9, 0x6e, 0x3b, 0x82, 0x84, 0x63, 0xc8, 0x19, 0x9c, 0xad, 0xf4, 0x8b, - 0xc8, 0xd9, 0x7e, 0x24, 0xc1, 0x54, 0x30, 0x4d, 0xd7, 0x40, 0xda, 0x76, 0xa3, 0xa4, 0xed, 0xed, - 0xdc, 0x2e, 0x9a, 0xc1, 0xda, 0xfe, 0x9b, 0x12, 0x7c, 0x5f, 0x89, 0x2e, 0xf0, 0x43, 0x45, 0x3d, - 0x19, 0xbc, 0xc7, 0x43, 0xdf, 0x97, 0x00, 0x89, 0x2c, 0xb0, 0x6a, 0x18, 0xa6, 0xab, 0xf0, 0x58, - 0xc9, 0x9b, 0xb5, 0x95, 0xbb, 0x59, 0x5e, 0x8d, 0xb5, 0x83, 0x04, 0xd6, 0x86, 0xe1, 0xda, 0xfd, - 0x60, 0x92, 0x93, 0x0a, 0x38, 0xa5, 0x01, 0x48, 0x01, 0xb0, 0x05, 0xe6, 0xbe, 0x29, 0x16, 0xf2, - 0xbb, 0x39, 0x62, 0x1e, 0x35, 0x58, 0x33, 0x8d, 0x23, 0xad, 0x13, 0x84, 0x1d, 0xec, 0x03, 0xe1, - 0x10, 0xe8, 0xd2, 0x06, 0x2c, 0x66, 0xb4, 0x16, 0xcd, 0x40, 0xf1, 0x84, 0xf4, 0xf9, 0xb0, 0x61, - 0xfa, 0x27, 0x9a, 0x87, 0xd1, 0x53, 0x45, 0xef, 0xf1, 0xf0, 0x3b, 0x8e, 0xf9, 0xc7, 0x07, 0x85, - 0xf7, 0x25, 0xf9, 0x87, 0xa3, 0x61, 0xdf, 0x61, 0x8c, 0xf9, 0x1e, 0xdd, 0xb4, 0x5a, 0xba, 0xa6, - 0x2a, 0x8e, 0x20, 0x42, 0x37, 0xf9, 0x86, 0x95, 0x97, 0x61, 0x5f, 0x1a, 0xe1, 0xd6, 0x85, 0xd7, - 0xcb, 0xad, 0x8b, 0xaf, 0x86, 0x5b, 0xff, 0x26, 0x94, 0x1d, 0x8f, 0x55, 0x8f, 0x30, 0xc8, 0x07, - 0x43, 0xc4, 0x57, 0x41, 0xa8, 0xfd, 0x0a, 0x7c, 0x2a, 0xed, 0x83, 0xa6, 0x91, 0xe8, 0xd1, 0x21, - 0x49, 0xf4, 0x2b, 0x25, 0xbe, 0x34, 0xde, 0x58, 0x4a, 0xcf, 0x21, 0x6d, 0x16, 0xdb, 0xca, 0x41, - 0xbc, 0x69, 0xb2, 0x52, 0x2c, 0xa4, 0xe8, 0x93, 0x88, 0xcb, 0x96, 0xaf, 0xe2, 0xb2, 0x53, 0xd9, - 0xee, 0x8a, 0x0e, 0x60, 0xd1, 0xb2, 0xcd, 0x8e, 0x4d, 0x1c, 0x67, 0x9d, 0x28, 0x6d, 0x5d, 0x33, - 0x88, 0x37, 0x3e, 0x9c, 0x11, 0xdd, 0xbe, 0x38, 0xaf, 0x2e, 0x36, 0xd3, 0x55, 0x70, 0x96, 0xad, - 0xfc, 0x62, 0x04, 0x66, 0xe2, 0x19, 0x30, 0x83, 0xa4, 0x4a, 0x57, 0x22, 0xa9, 0xf7, 0x43, 0x8b, - 0x81, 0x33, 0xf8, 0xd0, 0x09, 0x4e, 0x62, 0x41, 0xac, 0xc2, 0xb4, 0x88, 0x06, 0x9e, 0x50, 0xd0, - 0x74, 0x7f, 0xf6, 0x0f, 0xa2, 0x62, 0x1c, 0xd7, 0x47, 0x4f, 0x60, 0xd2, 0x66, 0xbc, 0xdb, 0x03, - 0xe0, 0xdc, 0xf5, 0x96, 0x00, 0x98, 0xc4, 0x61, 0x21, 0x8e, 0xea, 0x52, 0xde, 0x1a, 0xd0, 0x51, - 0x0f, 0x60, 0x24, 0xca, 0x5b, 0x57, 0xe3, 0x0a, 0x38, 0x69, 0x83, 0x76, 0x60, 0xae, 0x67, 0x24, - 0xa1, 0xb8, 0x2b, 0xdf, 0x16, 0x50, 0x73, 0x07, 0x49, 0x15, 0x9c, 0x66, 0x87, 0x8e, 0x22, 0x54, - 0x76, 0x8c, 0x85, 0xe7, 0x95, 0xdc, 0x0b, 0x2f, 0x37, 0x97, 0x4d, 0xa1, 0xdb, 0xe5, 0xbc, 0x74, - 0x5b, 0xfe, 0x7b, 0x29, 0x9c, 0x84, 0x7c, 0x0a, 0x3c, 0xe8, 0x94, 0x29, 0x61, 0x11, 0x62, 0x47, - 0x66, 0x3a, 0xfb, 0x7d, 0x3c, 0x14, 0xfb, 0x0d, 0x92, 0xe7, 0x60, 0xfa, 0xfb, 0x99, 0x04, 0x0b, - 0x9b, 0xad, 0xa7, 0xb6, 0xd9, 0xb3, 0xbc, 0xe6, 0xec, 0x59, 0x7c, 0x68, 0xbe, 0x0c, 0x23, 0x76, - 0x4f, 0xf7, 0xfa, 0xf1, 0xa6, 0xd7, 0x0f, 0xdc, 0xd3, 0x69, 0x3f, 0xe6, 0x62, 0x56, 0xbc, 0x13, - 0xd4, 0x00, 0xed, 0xc2, 0x98, 0xad, 0x18, 0x1d, 0xe2, 0xa5, 0xd5, 0xb7, 0x06, 0xb4, 0x7e, 0x6b, - 0x1d, 0x53, 0xf5, 0x10, 0xb1, 0x61, 0xd6, 0x58, 0xa0, 0xc8, 0xff, 0x20, 0xc1, 0xf4, 0xb3, 0xfd, - 0xfd, 0xe6, 0x96, 0xc1, 0x56, 0x34, 0x3b, 0x5b, 0xbd, 0x0b, 0x23, 0x96, 0xe2, 0x1e, 0xc7, 0x33, - 0x3d, 0x95, 0x61, 0x26, 0x41, 0x0f, 0xa1, 0x4c, 0xff, 0xa5, 0xed, 0x62, 0x4b, 0x6a, 0x9c, 0x05, - 0xc2, 0x72, 0x53, 0x94, 0xbd, 0x0c, 0xfd, 0x8d, 0x7d, 0x4d, 0xf4, 0x2d, 0x28, 0xd1, 0xf8, 0x43, - 0x8c, 0x76, 0x4e, 0x82, 0x2e, 0x1a, 0xd5, 0xe0, 0x46, 0x01, 0xe7, 0x12, 0x05, 0xd8, 0x83, 0x93, - 0x4f, 0x60, 0x3e, 0xd4, 0x09, 0x3a, 0x8a, 0xcf, 0x69, 0x4e, 0x45, 0x2d, 0x18, 0xa5, 0xb5, 0xd3, - 0xcc, 0x59, 0xcc, 0x71, 0x04, 0x1a, 0x1b, 0x88, 0x80, 0x1f, 0xd1, 0x2f, 0x07, 0x73, 0x2c, 0x79, - 0x07, 0x26, 0xd9, 0x31, 0xb4, 0x69, 0xbb, 0x6c, 0x30, 0xd1, 0x1d, 0x28, 0x76, 0x35, 0x43, 0x64, - 0xe7, 0x09, 0x61, 0x53, 0xa4, 0x99, 0x85, 0x96, 0x33, 0xb1, 0x72, 0x26, 0xe2, 0x55, 0x20, 0x56, - 0xce, 0x30, 0x2d, 0x97, 0x9f, 0x42, 0x49, 0x4c, 0x52, 0x18, 0xa8, 0x78, 0x39, 0x50, 0x31, 0x05, - 0x68, 0x0f, 0x4a, 0x5b, 0xcd, 0x86, 0x6e, 0x72, 0xae, 0xa6, 0x6a, 0x6d, 0x3b, 0x3e, 0x83, 0x6b, - 0x5b, 0xeb, 0x18, 0x33, 0x09, 0x92, 0x61, 0x8c, 0x9c, 0xa9, 0xc4, 0x72, 0x99, 0x1f, 0x8d, 0x37, - 0x80, 0xfa, 0xc6, 0x06, 0x2b, 0xc1, 0x42, 0x22, 0xff, 0x51, 0x01, 0x4a, 0x62, 0x38, 0xae, 0x61, - 0xef, 0xb6, 0x1d, 0xd9, 0xbb, 0xbd, 0x93, 0xcf, 0x35, 0x32, 0x37, 0x6e, 0xfb, 0xb1, 0x8d, 0xdb, - 0xfd, 0x9c, 0x78, 0x97, 0xef, 0xda, 0xbe, 0x57, 0x80, 0xa9, 0xa8, 0x53, 0xa2, 0x47, 0x30, 0x41, - 0xd3, 0x94, 0xa6, 0x92, 0xdd, 0x80, 0x1d, 0xfb, 0x47, 0x37, 0xad, 0x40, 0x84, 0xc3, 0x7a, 0xa8, - 0xe3, 0x9b, 0x51, 0x3f, 0x12, 0x9d, 0xce, 0x1e, 0xd2, 0x9e, 0xab, 0xe9, 0x35, 0x7e, 0x21, 0x53, - 0xdb, 0x32, 0xdc, 0x3d, 0xbb, 0xe5, 0xda, 0x9a, 0xd1, 0x49, 0x54, 0xc4, 0x9c, 0x32, 0x8c, 0x8c, - 0xbe, 0x49, 0x53, 0xa6, 0x63, 0xf6, 0x6c, 0x95, 0xa4, 0x51, 0x5f, 0x8f, 0xb6, 0xd1, 0x05, 0xda, - 0xde, 0x36, 0x55, 0x45, 0xe7, 0x93, 0x83, 0xc9, 0x11, 0xb1, 0x89, 0xa1, 0x12, 0x8f, 0x6e, 0x72, - 0x08, 0xec, 0x83, 0xc9, 0x7f, 0x23, 0xc1, 0x84, 0x18, 0x8b, 0x6b, 0xd8, 0xe4, 0x7c, 0x23, 0xba, - 0xc9, 0x79, 0x2b, 0x67, 0xe4, 0x48, 0xdf, 0xe1, 0xfc, 0xad, 0x04, 0x4b, 0x5e, 0xd3, 0x4d, 0xa5, - 0xdd, 0x50, 0x74, 0xc5, 0x50, 0x89, 0xed, 0xf9, 0xfa, 0x12, 0x14, 0x34, 0x4b, 0xcc, 0x24, 0x08, - 0x80, 0xc2, 0x56, 0x13, 0x17, 0x34, 0x8b, 0x32, 0x90, 0x63, 0xd3, 0x71, 0xd9, 0x4e, 0x88, 0x6f, - 0xb2, 0xfd, 0x56, 0x3f, 0x13, 0xe5, 0xd8, 0xd7, 0x40, 0x07, 0x30, 0x6a, 0x99, 0xb6, 0x4b, 0xb3, - 0x7e, 0x31, 0x36, 0xbf, 0x97, 0xb4, 0x9a, 0xce, 0x9b, 0x70, 0xc4, 0x20, 0x02, 0x51, 0x18, 0xcc, - 0xd1, 0xe4, 0xdf, 0x93, 0xe0, 0x8d, 0x94, 0xf6, 0x0b, 0xc2, 0xd5, 0x86, 0x92, 0xc6, 0x85, 0x22, - 0xec, 0x7d, 0x25, 0x5f, 0xb5, 0x29, 0x43, 0x11, 0x84, 0x5c, 0x2f, 0xb4, 0x7a, 0xd0, 0xf2, 0x0f, - 0x24, 0x98, 0x4d, 0xb4, 0x97, 0xa5, 0x0e, 0xea, 0xcf, 0x62, 0xa7, 0xe2, 0xa7, 0x0e, 0xea, 0x96, - 0x4c, 0x82, 0xbe, 0x01, 0x65, 0x76, 0x8f, 0xa8, 0x9a, 0xba, 0x18, 0xc0, 0xba, 0x37, 0x80, 0x4d, - 0x51, 0xfe, 0xf2, 0xbc, 0x7a, 0x3b, 0xe5, 0x9c, 0xc2, 0x13, 0x63, 0x1f, 0x00, 0x55, 0x61, 0x94, - 0xd8, 0xb6, 0x69, 0x8b, 0x24, 0x34, 0x4e, 0x47, 0x6a, 0x83, 0x16, 0x60, 0x5e, 0x2e, 0xff, 0x45, - 0xe0, 0xa4, 0x34, 0x2b, 0xd0, 0xf6, 0xd1, 0xc9, 0x89, 0x07, 0x46, 0x3a, 0x75, 0x98, 0x49, 0x50, - 0x0f, 0x66, 0xb4, 0x58, 0x1a, 0x11, 0xab, 0xb3, 0x9e, 0x6f, 0x18, 0x7d, 0xb3, 0x46, 0x45, 0xc0, - 0xcf, 0xc4, 0x25, 0x38, 0x51, 0x85, 0x4c, 0x20, 0xa1, 0x85, 0x3e, 0x82, 0x91, 0x63, 0xd7, 0xb5, - 0x52, 0x2e, 0x4a, 0x06, 0x24, 0xaf, 0xa0, 0x09, 0x65, 0xd6, 0xbb, 0xfd, 0xfd, 0x26, 0x66, 0x50, - 0xf2, 0xdf, 0x15, 0xfc, 0xf1, 0x60, 0xbb, 0xcb, 0x0f, 0xfd, 0xde, 0xae, 0xe9, 0x8a, 0xe3, 0xb0, - 0x10, 0xc6, 0x4f, 0x42, 0xe6, 0x43, 0x0d, 0xf7, 0x65, 0x38, 0xa1, 0x8d, 0xf6, 0x83, 0xa4, 0x2e, - 0x5d, 0x25, 0xa9, 0x4f, 0xa4, 0x25, 0x74, 0xf4, 0x0c, 0x8a, 0xae, 0x9e, 0xf7, 0x44, 0x43, 0x20, - 0xee, 0x6f, 0xb7, 0x82, 0xac, 0xb8, 0xbf, 0xdd, 0xc2, 0x14, 0x02, 0xed, 0xc1, 0x28, 0x25, 0x4e, - 0x34, 0x0f, 0x14, 0xf3, 0xe7, 0x15, 0x3a, 0x82, 0xc1, 0xe2, 0xa3, 0x5f, 0x0e, 0xe6, 0x38, 0xf2, - 0xef, 0x4b, 0x30, 0x19, 0xc9, 0x16, 0xc8, 0x86, 0x9b, 0x7a, 0x68, 0xed, 0x88, 0x71, 0x78, 0x7f, - 0xf8, 0x55, 0x27, 0x16, 0xfd, 0xbc, 0xa8, 0xf7, 0x66, 0x58, 0x86, 0x23, 0x75, 0xc8, 0x0a, 0x40, - 0xd0, 0x6d, 0xba, 0x0e, 0xa8, 0xf3, 0xf2, 0x05, 0x2f, 0xd6, 0x01, 0xf5, 0x69, 0x07, 0xf3, 0x72, - 0xb4, 0x02, 0xe0, 0x10, 0xd5, 0x26, 0xee, 0x6e, 0x10, 0xb8, 0xfc, 0x74, 0xdc, 0xf2, 0x25, 0x38, - 0xa4, 0x25, 0x7f, 0x56, 0x80, 0xc9, 0x5d, 0xe2, 0x7e, 0xd7, 0xb4, 0x4f, 0x9a, 0xa6, 0xae, 0xa9, - 0xfd, 0x6b, 0x20, 0x01, 0x38, 0x42, 0x02, 0x06, 0xc5, 0xcb, 0x48, 0xeb, 0x32, 0xa9, 0xc0, 0xc7, - 0x31, 0x2a, 0xb0, 0x32, 0x14, 0xea, 0xe5, 0x84, 0xe0, 0x47, 0x12, 0x2c, 0x46, 0xf4, 0x37, 0x82, - 0x58, 0xe3, 0x07, 0x7f, 0x29, 0x57, 0xf0, 0x8f, 0xc0, 0xd0, 0x80, 0x99, 0x1e, 0xfc, 0xd1, 0x36, - 0x14, 0x5c, 0x53, 0xac, 0x8c, 0xe1, 0x30, 0x09, 0xb1, 0x83, 0x7c, 0xb6, 0x6f, 0xe2, 0x82, 0x6b, - 0xca, 0xff, 0x28, 0x41, 0x25, 0xa2, 0x15, 0x8e, 0x96, 0xaf, 0xa9, 0x07, 0x18, 0x46, 0x8e, 0x6c, - 0xb3, 0x7b, 0xe5, 0x3e, 0xf8, 0x93, 0xbc, 0x69, 0x9b, 0x5d, 0xcc, 0xb0, 0xe4, 0x1f, 0x4b, 0x30, - 0x1b, 0xd1, 0xbc, 0x06, 0x4e, 0xf2, 0x51, 0x94, 0x93, 0xdc, 0x1f, 0xa6, 0x23, 0x19, 0xcc, 0xe4, - 0xc7, 0x85, 0x58, 0x37, 0x68, 0x87, 0xd1, 0x11, 0x4c, 0x58, 0x66, 0xbb, 0xf5, 0x0a, 0x2e, 0xce, - 0xa7, 0x29, 0x57, 0x6c, 0x06, 0x58, 0x38, 0x0c, 0x8c, 0xce, 0x60, 0x96, 0xd2, 0x16, 0xc7, 0x52, - 0x54, 0xd2, 0x7a, 0x05, 0x47, 0x89, 0xb7, 0xd8, 0xcd, 0x5c, 0x1c, 0x11, 0x27, 0x2b, 0x41, 0x3b, - 0x50, 0xd2, 0x2c, 0xb6, 0x77, 0x11, 0x8b, 0x74, 0x20, 0xc1, 0xe3, 0x3b, 0x1d, 0x9e, 0x3e, 0xc4, - 0x07, 0xf6, 0x30, 0xe4, 0x7f, 0x8d, 0x7b, 0x03, 0xa3, 0xc2, 0x4f, 0x43, 0xd4, 0x43, 0xdc, 0xa1, - 0x5d, 0x8d, 0x76, 0xec, 0x0a, 0x96, 0x73, 0x55, 0xd6, 0x5e, 0x8e, 0x71, 0xa2, 0x5f, 0x81, 0x12, - 0x31, 0xda, 0x6c, 0x23, 0xc0, 0x0f, 0xa8, 0x58, 0xaf, 0x36, 0x78, 0x11, 0xf6, 0x64, 0xf2, 0x1f, - 0x14, 0x63, 0xbd, 0x62, 0x29, 0xfc, 0xd3, 0x57, 0xe6, 0x1c, 0xfe, 0x66, 0x22, 0xd3, 0x41, 0x0e, - 0x03, 0x6a, 0xc9, 0x7d, 0xfe, 0xcb, 0xc3, 0xf8, 0x7c, 0x38, 0xb7, 0x66, 0x12, 0x4b, 0xf4, 0x6d, - 0x18, 0x23, 0xbc, 0x0a, 0x9e, 0xb1, 0x1f, 0x0f, 0x53, 0x45, 0x10, 0x7e, 0x83, 0x90, 0x2d, 0xca, - 0x04, 0x2a, 0xfa, 0x1a, 0x1d, 0x2f, 0xaa, 0x4b, 0xb7, 0x3c, 0x9c, 0x99, 0x8f, 0x37, 0xee, 0xf0, - 0x6e, 0xfb, 0xc5, 0x2f, 0xcf, 0xab, 0x10, 0x7c, 0xe2, 0xb0, 0x85, 0xfc, 0xdb, 0x30, 0x97, 0x92, - 0x22, 0x90, 0x1a, 0x39, 0x55, 0xe3, 0x11, 0xb3, 0x9e, 0x6f, 0x1a, 0xf2, 0x5f, 0x0f, 0xff, 0xb3, - 0x04, 0xb3, 0x6c, 0x76, 0xd4, 0x9e, 0xad, 0xb9, 0xfd, 0x6b, 0xcb, 0xcb, 0xcf, 0x23, 0x79, 0xf9, - 0xe1, 0x80, 0x29, 0x49, 0xb4, 0x30, 0x2b, 0x37, 0xcb, 0x3f, 0x91, 0xe0, 0x56, 0x42, 0xfb, 0x1a, - 0x42, 0xf7, 0x41, 0x34, 0x74, 0x7f, 0x69, 0xd8, 0x0e, 0x65, 0x84, 0xef, 0xff, 0x9a, 0x4d, 0xe9, - 0x0e, 0x5b, 0xa5, 0x2b, 0x00, 0x96, 0xad, 0x9d, 0x6a, 0x3a, 0xe9, 0x88, 0x17, 0x2d, 0xe5, 0xd0, - 0x7b, 0x45, 0x5f, 0x82, 0x43, 0x5a, 0xc8, 0x81, 0x85, 0x36, 0x39, 0x52, 0x7a, 0xba, 0xbb, 0xda, - 0x6e, 0xaf, 0x29, 0x96, 0x72, 0xa8, 0xe9, 0x9a, 0xab, 0x89, 0xb3, 0xbf, 0xf1, 0xc6, 0x13, 0xfe, - 0xd2, 0x24, 0x4d, 0xe3, 0xe5, 0x79, 0xf5, 0x4e, 0xda, 0x55, 0xaf, 0xa7, 0xd2, 0xc7, 0x19, 0xd0, - 0xa8, 0x0f, 0x15, 0x9b, 0xfc, 0x56, 0x4f, 0xb3, 0x49, 0x7b, 0xdd, 0x36, 0xad, 0x48, 0xb5, 0x45, - 0x56, 0xed, 0xaf, 0x5f, 0x9c, 0x57, 0x2b, 0x38, 0x43, 0x67, 0x70, 0xc5, 0x99, 0xf0, 0xe8, 0x53, - 0x98, 0x53, 0xc4, 0xcb, 0xd2, 0x70, 0xad, 0x7c, 0x85, 0xbe, 0x7f, 0x71, 0x5e, 0x9d, 0x5b, 0x4d, - 0x8a, 0x07, 0x57, 0x98, 0x06, 0x8a, 0xea, 0x50, 0x3a, 0x65, 0x8f, 0x50, 0x9d, 0xca, 0x28, 0xc3, - 0xa7, 0xb9, 0xaa, 0xc4, 0xdf, 0xa5, 0x52, 0xcc, 0xb1, 0xcd, 0x16, 0x5b, 0xf9, 0x9e, 0x16, 0x7a, - 0x04, 0x13, 0x94, 0x4a, 0x8b, 0x95, 0xcf, 0xae, 0x7f, 0xca, 0x41, 0xc4, 0x7c, 0x16, 0x88, 0x70, - 0x58, 0x0f, 0x7d, 0x02, 0xe3, 0xc7, 0xe2, 0xb0, 0xd0, 0xa9, 0x94, 0x72, 0xf1, 0x84, 0xc8, 0xe1, - 0x62, 0x63, 0x56, 0x54, 0x31, 0xee, 0x15, 0x3b, 0x38, 0x40, 0x44, 0x6f, 0x43, 0x89, 0x7d, 0x6c, - 0xad, 0xb3, 0xb3, 0xf5, 0x72, 0x10, 0x57, 0x9f, 0xf1, 0x62, 0xec, 0xc9, 0x3d, 0xd5, 0xad, 0xe6, - 0x1a, 0xbb, 0xe3, 0x89, 0xa9, 0x6e, 0x35, 0xd7, 0xb0, 0x27, 0x47, 0xdf, 0x81, 0x92, 0x43, 0xb6, - 0x35, 0xa3, 0x77, 0x56, 0x81, 0x5c, 0x2f, 0x44, 0x5a, 0x1b, 0x4c, 0x3b, 0x76, 0xca, 0x1d, 0xd4, - 0x20, 0xe4, 0xd8, 0x83, 0x45, 0xc7, 0x30, 0x6e, 0xf7, 0x8c, 0x55, 0xe7, 0xc0, 0x21, 0x76, 0x65, - 0x82, 0xd5, 0x31, 0x28, 0x95, 0x60, 0x4f, 0x3f, 0x5e, 0x8b, 0x3f, 0x42, 0xbe, 0x06, 0x0e, 0xc0, - 0xd1, 0x31, 0x00, 0xfb, 0x60, 0x07, 0xea, 0x95, 0x85, 0x5c, 0x5b, 0x33, 0xec, 0x1b, 0xc4, 0xeb, - 0xe2, 0x97, 0x6a, 0xbe, 0x18, 0x87, 0xb0, 0xd1, 0x1f, 0x4a, 0x80, 0x9c, 0x9e, 0x65, 0xe9, 0xa4, - 0x4b, 0x0c, 0x57, 0xd1, 0x59, 0xa9, 0x53, 0xb9, 0xc9, 0xaa, 0xfc, 0x70, 0xd0, 0x08, 0x26, 0x0c, - 0xe3, 0x55, 0xfb, 0x77, 0x65, 0x49, 0x55, 0x9c, 0x52, 0x2f, 0x9d, 0xc4, 0x23, 0xd1, 0xeb, 0xc9, - 0x5c, 0x93, 0x98, 0x7e, 0x55, 0x11, 0x4c, 0xa2, 0x90, 0x63, 0x0f, 0x16, 0x3d, 0x87, 0x05, 0xef, - 0xb5, 0x34, 0x36, 0x4d, 0x77, 0x53, 0xd3, 0x89, 0xd3, 0x77, 0x5c, 0xd2, 0xad, 0x4c, 0x31, 0x07, - 0xf3, 0x9f, 0x8c, 0xe1, 0x54, 0x2d, 0x9c, 0x61, 0x8d, 0xba, 0x50, 0xf5, 0x82, 0x13, 0x5d, 0xb9, - 0x7e, 0x74, 0xdc, 0x70, 0x54, 0x45, 0xe7, 0xd7, 0x87, 0xd3, 0xac, 0x82, 0x37, 0x2f, 0xce, 0xab, - 0xd5, 0xf5, 0xcb, 0x55, 0xf1, 0x20, 0x2c, 0xf4, 0x2d, 0xa8, 0x28, 0x59, 0xf5, 0xcc, 0xb0, 0x7a, - 0xbe, 0x40, 0x23, 0x5e, 0x66, 0x05, 0x99, 0xd6, 0xc8, 0x85, 0x19, 0x25, 0xfa, 0x6e, 0xdd, 0xa9, - 0xcc, 0xe6, 0xba, 0x89, 0x88, 0x3d, 0x77, 0x0f, 0x8e, 0x92, 0x62, 0x02, 0x07, 0x27, 0x6a, 0x40, - 0xbf, 0x03, 0x48, 0x89, 0x3f, 0xb5, 0x77, 0x2a, 0x28, 0x57, 0xa2, 0x4b, 0xbc, 0xd1, 0x0f, 0xdc, - 0x2e, 0x21, 0x72, 0x70, 0x4a, 0x3d, 0x74, 0x0f, 0xa1, 0xc4, 0x7e, 0x1e, 0xe0, 0x54, 0x16, 0x13, - 0x6c, 0xe8, 0x92, 0xca, 0x7d, 0xbb, 0xd0, 0x2d, 0x69, 0x1c, 0x11, 0x27, 0x2b, 0x41, 0xdb, 0x30, - 0x2f, 0x0a, 0x0f, 0x0c, 0x47, 0x39, 0x22, 0xad, 0xbe, 0xa3, 0xba, 0xba, 0x53, 0x99, 0x63, 0xf1, - 0x9d, 0xdd, 0xd4, 0xaf, 0xa6, 0xc8, 0x71, 0xaa, 0x15, 0xfa, 0x10, 0x66, 0x8e, 0x4c, 0xfb, 0x50, - 0x6b, 0xb7, 0x89, 0xe1, 0x21, 0xcd, 0x33, 0x24, 0x76, 0x32, 0xb6, 0x19, 0x93, 0xe1, 0x84, 0x36, - 0x72, 0xe0, 0x96, 0x40, 0x6e, 0xda, 0xa6, 0xba, 0x63, 0xf6, 0x0c, 0x97, 0x53, 0xce, 0x5b, 0x7e, - 0x1a, 0xbd, 0xb5, 0x9a, 0xa6, 0xf0, 0xf2, 0xbc, 0x7a, 0x37, 0x7d, 0x23, 0x12, 0x28, 0xe1, 0x74, - 0x6c, 0x64, 0xc1, 0x4d, 0xf1, 0xa3, 0x0f, 0x76, 0x44, 0x57, 0xa9, 0xb0, 0xa5, 0xff, 0xc1, 0xe0, - 0x80, 0xe7, 0x9b, 0xc4, 0xd7, 0xff, 0xcc, 0xc5, 0x79, 0xf5, 0x66, 0x58, 0x01, 0x47, 0x6a, 0x60, - 0x8f, 0xfc, 0xc4, 0xd5, 0xf2, 0xf5, 0xfc, 0x50, 0x62, 0xb8, 0x47, 0x7e, 0x41, 0xd3, 0x5e, 0xd9, - 0x23, 0xbf, 0x10, 0xe4, 0xe5, 0xa7, 0x43, 0xff, 0x59, 0x80, 0xb9, 0x40, 0x39, 0xf7, 0x23, 0xbf, - 0x14, 0x93, 0x5f, 0xfe, 0x58, 0x22, 0xdf, 0xc3, 0xbb, 0x60, 0xe8, 0xfe, 0xef, 0x3d, 0xbc, 0x0b, - 0xda, 0x96, 0xb1, 0x7b, 0xf8, 0xab, 0x42, 0xb8, 0x03, 0x43, 0xbe, 0xfe, 0x7a, 0x05, 0xbf, 0x17, - 0xf8, 0x85, 0x7b, 0x40, 0x26, 0xff, 0xa4, 0x08, 0x33, 0xf1, 0xd5, 0x18, 0x79, 0x24, 0x24, 0x0d, - 0x7c, 0x24, 0xd4, 0x84, 0xf9, 0xa3, 0x9e, 0xae, 0xf7, 0x59, 0x1f, 0x42, 0x2f, 0x85, 0xf8, 0x75, - 0xfd, 0x17, 0x84, 0xe5, 0xfc, 0x66, 0x8a, 0x0e, 0x4e, 0xb5, 0x4c, 0xbe, 0x19, 0x1a, 0xf9, 0xdf, - 0xbe, 0x19, 0x1a, 0xbd, 0xc2, 0x9b, 0xa1, 0xf4, 0x67, 0x57, 0xc5, 0x2b, 0x3d, 0xbb, 0xba, 0xca, - 0x83, 0xa1, 0x94, 0x20, 0x36, 0xf0, 0x74, 0xe3, 0xab, 0x30, 0x15, 0x7d, 0xc4, 0xc6, 0xe7, 0x92, - 0xbf, 0xa3, 0x13, 0xcf, 0x22, 0x42, 0x73, 0xc9, 0xcb, 0xb1, 0xaf, 0x21, 0x5f, 0x48, 0xb0, 0x90, - 0xfe, 0x58, 0x1d, 0xe9, 0x30, 0xd5, 0x55, 0xce, 0xc2, 0x3f, 0x20, 0x90, 0xae, 0x78, 0x78, 0xc7, - 0x5e, 0x2f, 0xed, 0x44, 0xb0, 0x70, 0x0c, 0x1b, 0x7d, 0x0c, 0xe5, 0xae, 0x72, 0xd6, 0xea, 0xd9, - 0x1d, 0x72, 0xe5, 0x43, 0x42, 0xb6, 0x8c, 0x76, 0x04, 0x0a, 0xf6, 0xf1, 0xe4, 0x9f, 0x4b, 0xb0, - 0x98, 0xf1, 0x26, 0xe9, 0xff, 0x51, 0x2f, 0x7f, 0x20, 0xc1, 0x1b, 0x99, 0xdb, 0x30, 0xf4, 0x38, - 0xf2, 0x7c, 0x4a, 0x8e, 0x3d, 0x9f, 0x42, 0x49, 0xc3, 0xd7, 0xf4, 0x7a, 0xea, 0x33, 0x09, 0x2a, - 0x59, 0xfb, 0x52, 0xf4, 0x28, 0xd2, 0xc8, 0x2f, 0xc6, 0x1a, 0x39, 0x9b, 0xb0, 0x7b, 0x4d, 0x6d, - 0xfc, 0x17, 0x09, 0x6e, 0x5f, 0xc2, 0xef, 0xfc, 0xed, 0x0f, 0x69, 0x87, 0xb5, 0xd8, 0xa9, 0xbd, - 0xb8, 0x4e, 0x0c, 0xb6, 0x3f, 0x29, 0x3a, 0x38, 0xd3, 0x1a, 0x1d, 0xc0, 0xa2, 0xd8, 0x7b, 0xc5, - 0x65, 0x82, 0xba, 0xb0, 0x57, 0xa6, 0xeb, 0xe9, 0x2a, 0x38, 0xcb, 0x56, 0xfe, 0x4b, 0x09, 0x16, - 0xd2, 0x0f, 0x1c, 0xd0, 0x7b, 0x91, 0x21, 0xaf, 0xc6, 0x86, 0x7c, 0x3a, 0x66, 0x25, 0x06, 0xfc, - 0xdb, 0x30, 0x25, 0x8e, 0x25, 0x04, 0x8c, 0x70, 0x66, 0x39, 0x2d, 0x3b, 0x09, 0x08, 0x8f, 0x1c, - 0xb3, 0x65, 0x12, 0x2d, 0xc3, 0x31, 0x34, 0xf9, 0x7b, 0x05, 0x18, 0x6d, 0xa9, 0x8a, 0x4e, 0xae, - 0x81, 0x1b, 0x7f, 0x3d, 0xc2, 0x8d, 0x07, 0xfd, 0x7e, 0x93, 0xb5, 0x2a, 0x93, 0x16, 0xe3, 0x18, - 0x2d, 0x7e, 0x27, 0x17, 0xda, 0xe5, 0x8c, 0xf8, 0x2b, 0x30, 0xee, 0x57, 0x3a, 0x5c, 0xa2, 0x96, - 0xff, 0xbc, 0x00, 0x13, 0xa1, 0x2a, 0x86, 0x4c, 0xf3, 0x47, 0x11, 0x6e, 0x53, 0xcc, 0x71, 0x08, - 0x14, 0xaa, 0xab, 0xe6, 0xb1, 0x19, 0xfe, 0xfb, 0x83, 0xe0, 0xc5, 0x79, 0x92, 0xe4, 0x7c, 0x15, - 0xa6, 0x5c, 0xc5, 0xee, 0x10, 0xd7, 0xbf, 0x90, 0xe1, 0x4f, 0x53, 0xfc, 0x1f, 0xc2, 0xec, 0x47, - 0xa4, 0x38, 0xa6, 0xbd, 0xf4, 0x04, 0x26, 0x23, 0x95, 0x0d, 0xf5, 0xf3, 0x81, 0xbf, 0x96, 0xe0, - 0x8b, 0x03, 0x0f, 0x92, 0x50, 0x23, 0xb2, 0x48, 0x6a, 0xb1, 0x45, 0xb2, 0x9c, 0x0d, 0xf0, 0xfa, - 0x9e, 0xa1, 0x36, 0xd6, 0x5e, 0x7c, 0xbe, 0x7c, 0xe3, 0xa7, 0x9f, 0x2f, 0xdf, 0xf8, 0xd9, 0xe7, - 0xcb, 0x37, 0x7e, 0xf7, 0x62, 0x59, 0x7a, 0x71, 0xb1, 0x2c, 0xfd, 0xf4, 0x62, 0x59, 0xfa, 0xd9, - 0xc5, 0xb2, 0xf4, 0xef, 0x17, 0xcb, 0xd2, 0x1f, 0xff, 0x7c, 0xf9, 0xc6, 0xc7, 0x77, 0x2e, 0xfd, - 0xff, 0x1e, 0xfe, 0x27, 0x00, 0x00, 0xff, 0xff, 0x08, 0x20, 0x7f, 0x0a, 0x28, 0x42, 0x00, 0x00, -} - -func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedCSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Driver) - copy(dAtA[i:], m.Driver) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedHostPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(m.PathPrefix) - copy(dAtA[i:], m.PathPrefix) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + // 2890 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcf, 0x6f, 0x24, 0x47, + 0xf5, 0xdf, 0x9e, 0xf1, 0xd8, 0xe3, 0xe7, 0xb5, 0xbd, 0x5b, 0xeb, 0xac, 0x1d, 0xef, 0x37, 0x76, + 0xd4, 0x5f, 0x11, 0x36, 0x61, 0x33, 0xc3, 0x6e, 0x92, 0x25, 0x3f, 0xa4, 0x84, 0x1d, 0xef, 0x26, + 0xeb, 0xc4, 0x1e, 0x4f, 0x6a, 0xc6, 0x09, 0x8a, 0x08, 0xd0, 0xee, 0x29, 0x8f, 0x3b, 0xee, 0xe9, + 0x1e, 0x75, 0xd7, 0x98, 0x35, 0x27, 0x10, 0x5c, 0x72, 0x82, 0x4b, 0x20, 0x47, 0x10, 0x12, 0x57, + 0xae, 0x1c, 0x42, 0x04, 0x22, 0x48, 0x2b, 0xc4, 0x21, 0x12, 0x07, 0x72, 0xb2, 0x88, 0x73, 0x42, + 0xfc, 0x03, 0x68, 0x4f, 0xa8, 0x7e, 0x74, 0xf5, 0x6f, 0xbb, 0xc7, 0x38, 0x16, 0x41, 0x9c, 0x3c, + 0x5d, 0xef, 0xbd, 0x4f, 0xbd, 0xaa, 0x7a, 0xf5, 0xde, 0xa7, 0xba, 0xda, 0xf0, 0xf2, 0xee, 0xb3, + 0x7e, 0xcd, 0x72, 0xeb, 0xbb, 0xc3, 0x2d, 0xe2, 0x39, 0x84, 0x12, 0xbf, 0xbe, 0x47, 0x9c, 0xae, + 0xeb, 0xd5, 0xa5, 0xc0, 0x18, 0x58, 0x75, 0x72, 0x8f, 0x12, 0xc7, 0xb7, 0x5c, 0xc7, 0xaf, 0xef, + 0x5d, 0xdf, 0x22, 0xd4, 0xb8, 0x5e, 0xef, 0x11, 0x87, 0x78, 0x06, 0x25, 0xdd, 0xda, 0xc0, 0x73, + 0xa9, 0x8b, 0x1e, 0x11, 0xea, 0x35, 0x63, 0x60, 0xd5, 0x42, 0xf5, 0x9a, 0x54, 0x5f, 0x7c, 0xb2, + 0x67, 0xd1, 0x9d, 0xe1, 0x56, 0xcd, 0x74, 0xfb, 0xf5, 0x9e, 0xdb, 0x73, 0xeb, 0xdc, 0x6a, 0x6b, + 0xb8, 0xcd, 0x9f, 0xf8, 0x03, 0xff, 0x25, 0xd0, 0x16, 0xf5, 0x48, 0xe7, 0xa6, 0xeb, 0x91, 0xfa, + 0x5e, 0xaa, 0xc7, 0xc5, 0xa7, 0x43, 0x9d, 0xbe, 0x61, 0xee, 0x58, 0x0e, 0xf1, 0xf6, 0xeb, 0x83, + 0xdd, 0x1e, 0x6b, 0xf0, 0xeb, 0x7d, 0x42, 0x8d, 0x2c, 0xab, 0x7a, 0x9e, 0x95, 0x37, 0x74, 0xa8, + 0xd5, 0x27, 0x29, 0x83, 0x9b, 0xc7, 0x19, 0xf8, 0xe6, 0x0e, 0xe9, 0x1b, 0x29, 0xbb, 0xa7, 0xf2, + 0xec, 0x86, 0xd4, 0xb2, 0xeb, 0x96, 0x43, 0x7d, 0xea, 0x25, 0x8d, 0xf4, 0xf7, 0x4a, 0x30, 0x79, + 0xdb, 0x20, 0x7d, 0xd7, 0x69, 0x13, 0x8a, 0xbe, 0x03, 0x55, 0x36, 0x8c, 0xae, 0x41, 0x8d, 0x05, + 0xed, 0x51, 0xed, 0xea, 0xd4, 0x8d, 0xaf, 0xd6, 0xc2, 0x69, 0x56, 0xa8, 0xb5, 0xc1, 0x6e, 0x8f, + 0x35, 0xf8, 0x35, 0xa6, 0x5d, 0xdb, 0xbb, 0x5e, 0xdb, 0xd8, 0x7a, 0x87, 0x98, 0x74, 0x9d, 0x50, + 0xa3, 0x81, 0xee, 0x1f, 0x2c, 0x9f, 0x3b, 0x3c, 0x58, 0x86, 0xb0, 0x0d, 0x2b, 0x54, 0xd4, 0x84, + 0x31, 0x7f, 0x40, 0xcc, 0x85, 0x12, 0x47, 0xbf, 0x56, 0x3b, 0x72, 0x11, 0x6b, 0xca, 0xb3, 0xf6, + 0x80, 0x98, 0x8d, 0xf3, 0x12, 0x79, 0x8c, 0x3d, 0x61, 0x8e, 0x83, 0xde, 0x80, 0x71, 0x9f, 0x1a, + 0x74, 0xe8, 0x2f, 0x94, 0x39, 0x62, 0xad, 0x30, 0x22, 0xb7, 0x6a, 0xcc, 0x48, 0xcc, 0x71, 0xf1, + 0x8c, 0x25, 0x9a, 0xfe, 0xf7, 0x12, 0x20, 0xa5, 0xbb, 0xe2, 0x3a, 0x5d, 0x8b, 0x5a, 0xae, 0x83, + 0x9e, 0x87, 0x31, 0xba, 0x3f, 0x20, 0x7c, 0x72, 0x26, 0x1b, 0x8f, 0x05, 0x0e, 0x75, 0xf6, 0x07, + 0xe4, 0xc1, 0xc1, 0xf2, 0xe5, 0xb4, 0x05, 0x93, 0x60, 0x6e, 0x83, 0xd6, 0x94, 0xab, 0x25, 0x6e, + 0xfd, 0x74, 0xbc, 0xeb, 0x07, 0x07, 0xcb, 0x19, 0x41, 0x58, 0x53, 0x48, 0x71, 0x07, 0xd1, 0x1e, + 0x20, 0xdb, 0xf0, 0x69, 0xc7, 0x33, 0x1c, 0x5f, 0xf4, 0x64, 0xf5, 0x89, 0x9c, 0x84, 0x27, 0x8a, + 0x2d, 0x1a, 0xb3, 0x68, 0x2c, 0x4a, 0x2f, 0xd0, 0x5a, 0x0a, 0x0d, 0x67, 0xf4, 0x80, 0x1e, 0x83, + 0x71, 0x8f, 0x18, 0xbe, 0xeb, 0x2c, 0x8c, 0xf1, 0x51, 0xa8, 0x09, 0xc4, 0xbc, 0x15, 0x4b, 0x29, + 0x7a, 0x1c, 0x26, 0xfa, 0xc4, 0xf7, 0x8d, 0x1e, 0x59, 0xa8, 0x70, 0xc5, 0x59, 0xa9, 0x38, 0xb1, + 0x2e, 0x9a, 0x71, 0x20, 0xd7, 0x3f, 0xd0, 0x60, 0x5a, 0xcd, 0xdc, 0x9a, 0xe5, 0x53, 0xf4, 0xcd, + 0x54, 0x1c, 0xd6, 0x8a, 0x0d, 0x89, 0x59, 0xf3, 0x28, 0xbc, 0x20, 0x7b, 0xab, 0x06, 0x2d, 0x91, + 0x18, 0x5c, 0x87, 0x8a, 0x45, 0x49, 0x9f, 0xad, 0x43, 0xf9, 0xea, 0xd4, 0x8d, 0xab, 0x45, 0x43, + 0xa6, 0x31, 0x2d, 0x41, 0x2b, 0xab, 0xcc, 0x1c, 0x0b, 0x14, 0xfd, 0xa7, 0x63, 0x11, 0xf7, 0x59, + 0x68, 0xa2, 0xb7, 0xa1, 0xea, 0x13, 0x9b, 0x98, 0xd4, 0xf5, 0xa4, 0xfb, 0x4f, 0x15, 0x74, 0xdf, + 0xd8, 0x22, 0x76, 0x5b, 0x9a, 0x36, 0xce, 0x33, 0xff, 0x83, 0x27, 0xac, 0x20, 0xd1, 0xeb, 0x50, + 0xa5, 0xa4, 0x3f, 0xb0, 0x0d, 0x4a, 0xe4, 0x3e, 0xfa, 0xff, 0xe8, 0x10, 0x58, 0xe4, 0x30, 0xb0, + 0x96, 0xdb, 0xed, 0x48, 0x35, 0xbe, 0x7d, 0xd4, 0x94, 0x04, 0xad, 0x58, 0xc1, 0xa0, 0x3d, 0x98, + 0x19, 0x0e, 0xba, 0x4c, 0x93, 0xb2, 0xec, 0xd0, 0xdb, 0x97, 0x91, 0x74, 0xb3, 0xe8, 0xdc, 0x6c, + 0xc6, 0xac, 0x1b, 0x97, 0x65, 0x5f, 0x33, 0xf1, 0x76, 0x9c, 0xe8, 0x05, 0xdd, 0x82, 0xd9, 0xbe, + 0xe5, 0x60, 0x62, 0x74, 0xf7, 0xdb, 0xc4, 0x74, 0x9d, 0xae, 0xcf, 0xc3, 0xaa, 0xd2, 0x98, 0x97, + 0x00, 0xb3, 0xeb, 0x71, 0x31, 0x4e, 0xea, 0xa3, 0x57, 0x01, 0x05, 0xc3, 0x78, 0x45, 0x24, 0x37, + 0xcb, 0x75, 0x78, 0xcc, 0x95, 0xc3, 0xe0, 0xee, 0xa4, 0x34, 0x70, 0x86, 0x15, 0x5a, 0x83, 0x39, + 0x8f, 0xec, 0x59, 0x6c, 0x8c, 0x77, 0x2d, 0x9f, 0xba, 0xde, 0xfe, 0x9a, 0xd5, 0xb7, 0xe8, 0xc2, + 0x38, 0xf7, 0x69, 0xe1, 0xf0, 0x60, 0x79, 0x0e, 0x67, 0xc8, 0x71, 0xa6, 0x95, 0xfe, 0xb3, 0x71, + 0x98, 0x4d, 0xe4, 0x1b, 0xf4, 0x06, 0x5c, 0x36, 0x87, 0x9e, 0x47, 0x1c, 0xda, 0x1c, 0xf6, 0xb7, + 0x88, 0xd7, 0x36, 0x77, 0x48, 0x77, 0x68, 0x93, 0x2e, 0x0f, 0x94, 0x4a, 0x63, 0x49, 0x7a, 0x7c, + 0x79, 0x25, 0x53, 0x0b, 0xe7, 0x58, 0xb3, 0x59, 0x70, 0x78, 0xd3, 0xba, 0xe5, 0xfb, 0x0a, 0xb3, + 0xc4, 0x31, 0xd5, 0x2c, 0x34, 0x53, 0x1a, 0x38, 0xc3, 0x8a, 0xf9, 0xd8, 0x25, 0xbe, 0xe5, 0x91, + 0x6e, 0xd2, 0xc7, 0x72, 0xdc, 0xc7, 0xdb, 0x99, 0x5a, 0x38, 0xc7, 0x1a, 0x3d, 0x03, 0x53, 0xa2, + 0x37, 0xbe, 0x7e, 0x72, 0xa1, 0x2f, 0x49, 0xb0, 0xa9, 0x66, 0x28, 0xc2, 0x51, 0x3d, 0x36, 0x34, + 0x77, 0xcb, 0x27, 0xde, 0x1e, 0xe9, 0xe6, 0x2f, 0xf0, 0x46, 0x4a, 0x03, 0x67, 0x58, 0xb1, 0xa1, + 0x89, 0x08, 0x4c, 0x0d, 0x6d, 0x3c, 0x3e, 0xb4, 0xcd, 0x4c, 0x2d, 0x9c, 0x63, 0xcd, 0xe2, 0x58, + 0xb8, 0x7c, 0x6b, 0xcf, 0xb0, 0x6c, 0x63, 0xcb, 0x26, 0x0b, 0x13, 0xf1, 0x38, 0x6e, 0xc6, 0xc5, + 0x38, 0xa9, 0x8f, 0x5e, 0x81, 0x8b, 0xa2, 0x69, 0xd3, 0x31, 0x14, 0x48, 0x95, 0x83, 0x3c, 0x2c, + 0x41, 0x2e, 0x36, 0x93, 0x0a, 0x38, 0x6d, 0x83, 0x9e, 0x87, 0x19, 0xd3, 0xb5, 0x6d, 0x1e, 0x8f, + 0x2b, 0xee, 0xd0, 0xa1, 0x0b, 0x93, 0x1c, 0x05, 0xb1, 0xfd, 0xb8, 0x12, 0x93, 0xe0, 0x84, 0x26, + 0x22, 0x00, 0x66, 0x50, 0x70, 0xfc, 0x05, 0xe0, 0xf9, 0xf1, 0x7a, 0xd1, 0x1c, 0xa0, 0x4a, 0x55, + 0xc8, 0x01, 0x54, 0x93, 0x8f, 0x23, 0xc0, 0xfa, 0x9f, 0x34, 0x98, 0xcf, 0x49, 0x1d, 0xe8, 0xa5, + 0x58, 0x89, 0xfd, 0x4a, 0xa2, 0xc4, 0x5e, 0xc9, 0x31, 0x8b, 0xd4, 0x59, 0x07, 0xa6, 0x3d, 0x36, + 0x2a, 0xa7, 0x27, 0x54, 0x64, 0x8e, 0x7c, 0xe6, 0x98, 0x61, 0xe0, 0xa8, 0x4d, 0x98, 0xf3, 0x2f, + 0x1e, 0x1e, 0x2c, 0x4f, 0xc7, 0x64, 0x38, 0x0e, 0xaf, 0xbf, 0x5f, 0x02, 0xb8, 0x4d, 0x06, 0xb6, + 0xbb, 0xdf, 0x27, 0xce, 0x59, 0x70, 0xa8, 0x8d, 0x18, 0x87, 0x7a, 0xf2, 0xb8, 0xe5, 0x51, 0xae, + 0xe5, 0x92, 0xa8, 0x37, 0x13, 0x24, 0xaa, 0x5e, 0x1c, 0xf2, 0x68, 0x16, 0xf5, 0xd7, 0x32, 0x5c, + 0x0a, 0x95, 0x43, 0x1a, 0xf5, 0x42, 0x6c, 0x8d, 0xbf, 0x9c, 0x58, 0xe3, 0xf9, 0x0c, 0x93, 0xcf, + 0x8d, 0x47, 0xbd, 0x03, 0x33, 0x8c, 0xe5, 0x88, 0xb5, 0xe4, 0x1c, 0x6a, 0x7c, 0x64, 0x0e, 0xa5, + 0xaa, 0xdd, 0x5a, 0x0c, 0x09, 0x27, 0x90, 0x73, 0x38, 0xdb, 0xc4, 0x17, 0x91, 0xb3, 0x7d, 0xa8, + 0xc1, 0x4c, 0xb8, 0x4c, 0x67, 0x40, 0xda, 0x9a, 0x71, 0xd2, 0xf6, 0x78, 0xe1, 0x10, 0xcd, 0x61, + 0x6d, 0xff, 0x64, 0x04, 0x5f, 0x29, 0xb1, 0x0d, 0xbe, 0x65, 0x98, 0xbb, 0xe8, 0x51, 0x18, 0x73, + 0x8c, 0x7e, 0x10, 0x99, 0x6a, 0xb3, 0x34, 0x8d, 0x3e, 0xc1, 0x5c, 0x82, 0xde, 0xd3, 0x00, 0xc9, + 0x2a, 0x70, 0xcb, 0x71, 0x5c, 0x6a, 0x88, 0x5c, 0x29, 0xdc, 0x5a, 0x2d, 0xec, 0x56, 0xd0, 0x63, + 0x6d, 0x33, 0x85, 0x75, 0xc7, 0xa1, 0xde, 0x7e, 0xb8, 0xc8, 0x69, 0x05, 0x9c, 0xe1, 0x00, 0x32, + 0x00, 0x3c, 0x89, 0xd9, 0x71, 0xe5, 0x46, 0x7e, 0xb2, 0x40, 0xce, 0x63, 0x06, 0x2b, 0xae, 0xb3, + 0x6d, 0xf5, 0xc2, 0xb4, 0x83, 0x15, 0x10, 0x8e, 0x80, 0x2e, 0xde, 0x81, 0xf9, 0x1c, 0x6f, 0xd1, + 0x05, 0x28, 0xef, 0x92, 0x7d, 0x31, 0x6d, 0x98, 0xfd, 0x44, 0x73, 0x50, 0xd9, 0x33, 0xec, 0xa1, + 0x48, 0xbf, 0x93, 0x58, 0x3c, 0x3c, 0x5f, 0x7a, 0x56, 0xd3, 0x3f, 0xa8, 0x44, 0x63, 0x87, 0x33, + 0xe6, 0xab, 0x50, 0xf5, 0xc8, 0xc0, 0xb6, 0x4c, 0xc3, 0x97, 0x44, 0x88, 0x93, 0x5f, 0x2c, 0xdb, + 0xb0, 0x92, 0xc6, 0xb8, 0x75, 0xe9, 0xf3, 0xe5, 0xd6, 0xe5, 0xd3, 0xe1, 0xd6, 0xdf, 0x86, 0xaa, + 0x1f, 0xb0, 0xea, 0x31, 0x0e, 0x79, 0x7d, 0x84, 0xfc, 0x2a, 0x09, 0xb5, 0xea, 0x40, 0x51, 0x69, + 0x05, 0x9a, 0x45, 0xa2, 0x2b, 0x23, 0x92, 0xe8, 0x53, 0x25, 0xbe, 0x2c, 0xdf, 0x0c, 0x8c, 0xa1, + 0x4f, 0xba, 0x3c, 0xb7, 0x55, 0xc3, 0x7c, 0xd3, 0xe2, 0xad, 0x58, 0x4a, 0xd1, 0xdb, 0xb1, 0x90, + 0xad, 0x9e, 0x24, 0x64, 0x67, 0xf2, 0xc3, 0x15, 0x6d, 0xc2, 0xfc, 0xc0, 0x73, 0x7b, 0x1e, 0xf1, + 0xfd, 0xdb, 0xc4, 0xe8, 0xda, 0x96, 0x43, 0x82, 0xf9, 0x11, 0x8c, 0xe8, 0xca, 0xe1, 0xc1, 0xf2, + 0x7c, 0x2b, 0x5b, 0x05, 0xe7, 0xd9, 0xea, 0xf7, 0xc7, 0xe0, 0x42, 0xb2, 0x02, 0xe6, 0x90, 0x54, + 0xed, 0x44, 0x24, 0xf5, 0x5a, 0x64, 0x33, 0x08, 0x06, 0xaf, 0x56, 0x3f, 0x63, 0x43, 0xdc, 0x82, + 0x59, 0x99, 0x0d, 0x02, 0xa1, 0xa4, 0xe9, 0x6a, 0xf5, 0x37, 0xe3, 0x62, 0x9c, 0xd4, 0x47, 0x2f, + 0xc0, 0xb4, 0xc7, 0x79, 0x77, 0x00, 0x20, 0xb8, 0xeb, 0x43, 0x12, 0x60, 0x1a, 0x47, 0x85, 0x38, + 0xae, 0xcb, 0x78, 0x6b, 0x48, 0x47, 0x03, 0x80, 0xb1, 0x38, 0x6f, 0xbd, 0x95, 0x54, 0xc0, 0x69, + 0x1b, 0xb4, 0x0e, 0x97, 0x86, 0x4e, 0x1a, 0x4a, 0x84, 0xf2, 0x15, 0x09, 0x75, 0x69, 0x33, 0xad, + 0x82, 0xb3, 0xec, 0xd0, 0x76, 0x8c, 0xca, 0x8e, 0xf3, 0xf4, 0x7c, 0xa3, 0xf0, 0xc6, 0x2b, 0xcc, + 0x65, 0x33, 0xe8, 0x76, 0xb5, 0x28, 0xdd, 0xd6, 0x7f, 0xaf, 0x45, 0x8b, 0x90, 0xa2, 0xc0, 0xc7, + 0xbd, 0x65, 0x4a, 0x59, 0x44, 0xd8, 0x91, 0x9b, 0xcd, 0x7e, 0x6f, 0x8e, 0xc4, 0x7e, 0xc3, 0xe2, + 0x79, 0x3c, 0xfd, 0xfd, 0x83, 0x06, 0xb3, 0x77, 0x3b, 0x9d, 0xd6, 0xaa, 0xc3, 0x77, 0x4b, 0xcb, + 0xa0, 0x3b, 0xac, 0x8a, 0x0e, 0x0c, 0xba, 0x93, 0xac, 0xa2, 0x4c, 0x86, 0xb9, 0x04, 0x3d, 0x0d, + 0x55, 0xf6, 0x97, 0x39, 0xce, 0xc3, 0x75, 0x92, 0x27, 0x99, 0x6a, 0x4b, 0xb6, 0x3d, 0x88, 0xfc, + 0xc6, 0x4a, 0x13, 0x7d, 0x03, 0x26, 0xd8, 0xde, 0x26, 0x4e, 0xb7, 0x20, 0xf9, 0x95, 0x4e, 0x35, + 0x84, 0x51, 0xc8, 0x67, 0x64, 0x03, 0x0e, 0xe0, 0xf4, 0x5d, 0x98, 0x8b, 0x0c, 0x02, 0x0f, 0x6d, + 0xf2, 0x06, 0xab, 0x57, 0xa8, 0x0d, 0x15, 0xd6, 0x3b, 0xab, 0x4a, 0xe5, 0x02, 0xaf, 0x17, 0x13, + 0x13, 0x11, 0x72, 0x0f, 0xf6, 0xe4, 0x63, 0x81, 0xa5, 0x6f, 0xc0, 0xc4, 0x6a, 0xab, 0x61, 0xbb, + 0x82, 0x6f, 0x98, 0x56, 0xd7, 0x4b, 0xce, 0xd4, 0xca, 0xea, 0x6d, 0x8c, 0xb9, 0x04, 0xe9, 0x30, + 0x4e, 0xee, 0x99, 0x64, 0x40, 0x39, 0xc5, 0x98, 0x6c, 0x00, 0x4b, 0xa4, 0x77, 0x78, 0x0b, 0x96, + 0x12, 0xfd, 0xc7, 0x25, 0x98, 0x90, 0xdd, 0x9e, 0xc1, 0xf9, 0x63, 0x2d, 0x76, 0xfe, 0x78, 0xa2, + 0xd8, 0x12, 0xe4, 0x1e, 0x3e, 0x3a, 0x89, 0xc3, 0xc7, 0xb5, 0x82, 0x78, 0x47, 0x9f, 0x3c, 0xde, + 0x2d, 0xc1, 0x4c, 0x7c, 0xf1, 0xd1, 0x33, 0x30, 0xc5, 0x52, 0xad, 0x65, 0x92, 0x66, 0xc8, 0xf0, + 0xd4, 0xeb, 0x87, 0x76, 0x28, 0xc2, 0x51, 0x3d, 0xd4, 0x53, 0x66, 0x2d, 0xd7, 0xa3, 0x72, 0xd0, + 0xf9, 0x53, 0x3a, 0xa4, 0x96, 0x5d, 0x13, 0x2f, 0xdb, 0x6b, 0xab, 0x0e, 0xdd, 0xf0, 0xda, 0xd4, + 0xb3, 0x9c, 0x5e, 0xaa, 0x23, 0x06, 0x86, 0xa3, 0xc8, 0xe8, 0x4d, 0x96, 0xf6, 0x7d, 0x77, 0xe8, + 0x99, 0x24, 0x8b, 0xbe, 0x05, 0xd4, 0x83, 0x6d, 0x84, 0xee, 0x9a, 0x6b, 0x1a, 0xb6, 0x58, 0x1c, + 0x4c, 0xb6, 0x89, 0x47, 0x1c, 0x93, 0x04, 0x94, 0x49, 0x40, 0x60, 0x05, 0xa6, 0xff, 0x46, 0x83, + 0x29, 0x39, 0x17, 0x67, 0x40, 0xd4, 0x5f, 0x8b, 0x13, 0xf5, 0xc7, 0x0a, 0xee, 0xd0, 0x6c, 0x96, + 0xfe, 0x5b, 0x0d, 0x16, 0x03, 0xd7, 0x5d, 0xa3, 0xdb, 0x30, 0x6c, 0xc3, 0x31, 0x89, 0x17, 0xc4, + 0xfa, 0x22, 0x94, 0xac, 0x81, 0x5c, 0x49, 0x90, 0x00, 0xa5, 0xd5, 0x16, 0x2e, 0x59, 0x03, 0x56, + 0x45, 0x77, 0x5c, 0x9f, 0x72, 0x36, 0x2f, 0x0e, 0x8a, 0xca, 0xeb, 0xbb, 0xb2, 0x1d, 0x2b, 0x0d, + 0xb4, 0x09, 0x95, 0x81, 0xeb, 0x51, 0x56, 0xb9, 0xca, 0x89, 0xf5, 0x3d, 0xc2, 0x6b, 0xb6, 0x6e, + 0x32, 0x10, 0xc3, 0x9d, 0xce, 0x60, 0xb0, 0x40, 0xd3, 0x7f, 0xa0, 0xc1, 0xc3, 0x19, 0xfe, 0x4b, + 0xd2, 0xd0, 0x85, 0x09, 0x4b, 0x08, 0x65, 0x7a, 0x79, 0xae, 0x58, 0xb7, 0x19, 0x53, 0x11, 0xa6, + 0xb6, 0x20, 0x85, 0x05, 0xd0, 0xfa, 0x2f, 0x35, 0xb8, 0x98, 0xf2, 0x97, 0xa7, 0x68, 0x16, 0xcf, + 0x92, 0x6d, 0xab, 0x14, 0xcd, 0xc2, 0x92, 0x4b, 0xd0, 0x6b, 0x50, 0xe5, 0x77, 0x44, 0xa6, 0x6b, + 0xcb, 0x09, 0xac, 0x07, 0x13, 0xd8, 0x92, 0xed, 0x0f, 0x0e, 0x96, 0xaf, 0x64, 0x9c, 0xb5, 0x03, + 0x31, 0x56, 0x00, 0x68, 0x19, 0x2a, 0xc4, 0xf3, 0x5c, 0x4f, 0x26, 0xfb, 0x49, 0x36, 0x53, 0x77, + 0x58, 0x03, 0x16, 0xed, 0xfa, 0xaf, 0xc2, 0x20, 0x65, 0xd9, 0x97, 0xf9, 0xc7, 0x16, 0x27, 0x99, + 0x18, 0xd9, 0xd2, 0x61, 0x2e, 0x41, 0x43, 0xb8, 0x60, 0x25, 0xd2, 0xb5, 0xdc, 0x9d, 0xf5, 0x62, + 0xd3, 0xa8, 0xcc, 0x1a, 0x0b, 0x12, 0xfe, 0x42, 0x52, 0x82, 0x53, 0x5d, 0xe8, 0x04, 0x52, 0x5a, + 0xe8, 0x75, 0x18, 0xdb, 0xa1, 0x74, 0x90, 0xf1, 0xb2, 0xff, 0x98, 0x22, 0x11, 0xba, 0x50, 0xe5, + 0xa3, 0xeb, 0x74, 0x5a, 0x98, 0x43, 0xe9, 0xbf, 0x2b, 0xa9, 0xf9, 0xe0, 0x27, 0xa4, 0xaf, 0xab, + 0xd1, 0xae, 0xd8, 0x86, 0xef, 0xf3, 0x14, 0x26, 0x4e, 0xf3, 0x73, 0x11, 0xc7, 0x95, 0x0c, 0xa7, + 0xb4, 0x51, 0x27, 0x2c, 0x9e, 0xda, 0x49, 0x8a, 0xe7, 0x54, 0x56, 0xe1, 0x44, 0x77, 0xa1, 0x4c, + 0xed, 0xa2, 0xa7, 0x72, 0x89, 0xd8, 0x59, 0x6b, 0x37, 0xa6, 0xe4, 0x94, 0x97, 0x3b, 0x6b, 0x6d, + 0xcc, 0x20, 0xd0, 0x06, 0x54, 0xbc, 0xa1, 0x4d, 0x58, 0x1d, 0x28, 0x17, 0xaf, 0x2b, 0x6c, 0x06, + 0xc3, 0xcd, 0xc7, 0x9e, 0x7c, 0x2c, 0x70, 0xf4, 0x1f, 0x6a, 0x30, 0x1d, 0xab, 0x16, 0xc8, 0x83, + 0xf3, 0x76, 0x64, 0xef, 0xc8, 0x79, 0x78, 0x76, 0xf4, 0x5d, 0x27, 0x37, 0xfd, 0x9c, 0xec, 0xf7, + 0x7c, 0x54, 0x86, 0x63, 0x7d, 0xe8, 0x06, 0x40, 0x38, 0x6c, 0xb6, 0x0f, 0x58, 0xf0, 0x8a, 0x0d, + 0x2f, 0xf7, 0x01, 0x8b, 0x69, 0x1f, 0x8b, 0x76, 0x74, 0x03, 0xc0, 0x27, 0xa6, 0x47, 0x68, 0x33, + 0x4c, 0x5c, 0xaa, 0x1c, 0xb7, 0x95, 0x04, 0x47, 0xb4, 0xf4, 0x5f, 0x94, 0x60, 0xba, 0x49, 0xe8, + 0x77, 0x5d, 0x6f, 0xb7, 0xe5, 0xda, 0x96, 0xb9, 0x7f, 0x06, 0x24, 0x00, 0xc7, 0x48, 0xc0, 0x71, + 0xf9, 0x32, 0xe6, 0x5d, 0x2e, 0x15, 0x78, 0x2b, 0x41, 0x05, 0x6e, 0x8c, 0x84, 0x7a, 0x34, 0x21, + 0xf8, 0x50, 0x83, 0xf9, 0x98, 0xfe, 0x9d, 0x30, 0xd7, 0xa8, 0xe4, 0xaf, 0x15, 0x4a, 0xfe, 0x31, + 0x18, 0x96, 0x30, 0xb3, 0x93, 0x3f, 0x5a, 0x83, 0x12, 0x75, 0xe5, 0xce, 0x18, 0x0d, 0x93, 0x10, + 0x2f, 0xac, 0x67, 0x1d, 0x17, 0x97, 0xa8, 0xab, 0xff, 0x51, 0x83, 0x85, 0x98, 0x56, 0x34, 0x5b, + 0x7e, 0x4e, 0x23, 0xc0, 0x30, 0xb6, 0xed, 0xb9, 0xfd, 0x13, 0x8f, 0x41, 0x2d, 0xf2, 0xcb, 0x9e, + 0xdb, 0xc7, 0x1c, 0x4b, 0xff, 0x48, 0x83, 0x8b, 0x31, 0xcd, 0x33, 0xe0, 0x24, 0xaf, 0xc7, 0x39, + 0xc9, 0xb5, 0x51, 0x06, 0x92, 0xc3, 0x4c, 0x3e, 0x2a, 0x25, 0x86, 0xc1, 0x06, 0x8c, 0xb6, 0x61, + 0x6a, 0xe0, 0x76, 0xdb, 0xa7, 0x70, 0xf9, 0x3b, 0xcb, 0xb8, 0x62, 0x2b, 0xc4, 0xc2, 0x51, 0x60, + 0x74, 0x0f, 0x2e, 0x32, 0xda, 0xe2, 0x0f, 0x0c, 0x93, 0xb4, 0x4f, 0xe1, 0x75, 0xd8, 0x43, 0xfc, + 0x76, 0x29, 0x89, 0x88, 0xd3, 0x9d, 0xa0, 0x75, 0x98, 0xb0, 0x06, 0xfc, 0xec, 0x22, 0x37, 0xe9, + 0xb1, 0x04, 0x4f, 0x9c, 0x74, 0x44, 0xf9, 0x90, 0x0f, 0x38, 0xc0, 0xd0, 0xff, 0x92, 0x8c, 0x06, + 0x4e, 0x85, 0x5f, 0x89, 0x50, 0x0f, 0x79, 0x0f, 0x74, 0x32, 0xda, 0xd1, 0x94, 0x2c, 0xe7, 0xa4, + 0xac, 0xbd, 0x9a, 0xe0, 0x44, 0x5f, 0x82, 0x09, 0xe2, 0x74, 0xf9, 0x41, 0x40, 0xbc, 0x64, 0xe1, + 0xa3, 0xba, 0x23, 0x9a, 0x70, 0x20, 0xd3, 0x7f, 0x54, 0x4e, 0x8c, 0x8a, 0x97, 0xf0, 0x77, 0x4e, + 0x2d, 0x38, 0xd4, 0x61, 0x22, 0x37, 0x40, 0xb6, 0x42, 0x6a, 0x29, 0x62, 0xfe, 0x6b, 0xa3, 0xc4, + 0x7c, 0xb4, 0xb6, 0xe6, 0x12, 0x4b, 0xf4, 0x2d, 0x18, 0x27, 0xa2, 0x0b, 0x51, 0xb1, 0x6f, 0x8e, + 0xd2, 0x45, 0x98, 0x7e, 0xc3, 0x94, 0x2d, 0xdb, 0x24, 0x2a, 0x7a, 0x89, 0xcd, 0x17, 0xd3, 0x65, + 0x47, 0x1e, 0xc1, 0xcc, 0x27, 0x1b, 0x8f, 0x88, 0x61, 0xab, 0xe6, 0x07, 0x07, 0xcb, 0x10, 0x3e, + 0xe2, 0xa8, 0x85, 0xfe, 0x3d, 0xb8, 0x94, 0x51, 0x22, 0x90, 0x19, 0x7b, 0x33, 0x24, 0x32, 0x66, + 0xbd, 0xd8, 0x32, 0x14, 0xbf, 0xe2, 0x7c, 0xbf, 0x04, 0x20, 0xdf, 0x45, 0x9d, 0xcd, 0x97, 0x55, + 0xa3, 0xdd, 0x0a, 0x86, 0xae, 0x9d, 0xda, 0xad, 0x60, 0x04, 0xf2, 0xe8, 0x52, 0xfc, 0x8f, 0x12, + 0x5c, 0x0a, 0x95, 0x0b, 0xdf, 0x0a, 0x66, 0x98, 0xfc, 0xef, 0xeb, 0xaa, 0x62, 0x37, 0x75, 0xe1, + 0xd4, 0xfd, 0xe7, 0xdd, 0xd4, 0x85, 0xbe, 0xe5, 0x54, 0xda, 0x5f, 0x97, 0xa2, 0x03, 0x18, 0xf1, + 0xba, 0xe8, 0x14, 0x3e, 0x30, 0xfa, 0xc2, 0xdd, 0x38, 0xe9, 0x7f, 0x2e, 0xc3, 0x85, 0xe4, 0x6e, + 0x8c, 0xdd, 0x2a, 0x68, 0xc7, 0xde, 0x2a, 0xb4, 0x60, 0x6e, 0x7b, 0x68, 0xdb, 0xfb, 0x7c, 0x0c, + 0x91, 0xab, 0x05, 0x71, 0x1f, 0xf1, 0x7f, 0xd2, 0x72, 0xee, 0xe5, 0x0c, 0x1d, 0x9c, 0x69, 0x99, + 0xbe, 0x64, 0x18, 0xfb, 0x77, 0x2f, 0x19, 0x2a, 0x27, 0xb8, 0x64, 0xc8, 0xbe, 0xa7, 0x29, 0x9f, + 0xe8, 0x9e, 0xe6, 0x24, 0x37, 0x0c, 0x19, 0x49, 0xec, 0xd8, 0x52, 0xf2, 0x22, 0xcc, 0xc4, 0x6f, + 0xbd, 0xc4, 0x5a, 0x8a, 0x8b, 0x37, 0x79, 0xc7, 0x14, 0x59, 0x4b, 0xd1, 0x8e, 0x95, 0x86, 0x7e, + 0xa8, 0xc1, 0xe5, 0xec, 0xaf, 0x5b, 0x90, 0x0d, 0x33, 0x7d, 0xe3, 0x5e, 0xf4, 0x8b, 0x23, 0xed, + 0x84, 0x4c, 0x89, 0x5f, 0x77, 0xac, 0xc7, 0xb0, 0x70, 0x02, 0x1b, 0xbd, 0x05, 0xd5, 0xbe, 0x71, + 0xaf, 0x3d, 0xf4, 0x7a, 0xe4, 0xc4, 0x8c, 0x8c, 0x6f, 0xa3, 0x75, 0x89, 0x82, 0x15, 0x9e, 0xfe, + 0x99, 0x06, 0xf3, 0x39, 0x97, 0x18, 0xff, 0x45, 0xa3, 0x7c, 0xb7, 0x04, 0x95, 0xb6, 0x69, 0xd8, + 0xe4, 0x0c, 0x08, 0xc5, 0xab, 0x31, 0x42, 0x71, 0xdc, 0x57, 0xb2, 0xdc, 0xab, 0x5c, 0x2e, 0x81, + 0x13, 0x5c, 0xe2, 0x89, 0x42, 0x68, 0x47, 0xd3, 0x88, 0xe7, 0x60, 0x52, 0x75, 0x3a, 0x5a, 0x76, + 0xd3, 0x7f, 0x5e, 0x82, 0xa9, 0x48, 0x17, 0x23, 0xe6, 0xc6, 0xed, 0x58, 0x41, 0x28, 0x17, 0x78, + 0x83, 0x14, 0xe9, 0xab, 0x16, 0x94, 0x00, 0xf1, 0x95, 0x47, 0x78, 0xaf, 0x9f, 0xae, 0x0c, 0x2f, + 0xc2, 0x0c, 0x35, 0xbc, 0x1e, 0xa1, 0xea, 0xc8, 0x20, 0x5e, 0x9e, 0xaa, 0xcf, 0x8d, 0x3a, 0x31, + 0x29, 0x4e, 0x68, 0x2f, 0xbe, 0x00, 0xd3, 0xb1, 0xce, 0x46, 0xf9, 0x48, 0xa3, 0xb1, 0x72, 0xff, + 0xd3, 0xa5, 0x73, 0x1f, 0x7f, 0xba, 0x74, 0xee, 0x93, 0x4f, 0x97, 0xce, 0x7d, 0xff, 0x70, 0x49, + 0xbb, 0x7f, 0xb8, 0xa4, 0x7d, 0x7c, 0xb8, 0xa4, 0x7d, 0x72, 0xb8, 0xa4, 0xfd, 0xed, 0x70, 0x49, + 0xfb, 0xc9, 0x67, 0x4b, 0xe7, 0xde, 0x7a, 0xe4, 0xc8, 0xff, 0xd9, 0xf8, 0x57, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x39, 0x36, 0x95, 0x55, 0xec, 0x31, 0x00, 0x00, } func (m *DaemonSet) Marshal() (dAtA []byte, err error) { @@ -2882,48 +2320,6 @@ func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3006,7 +2402,7 @@ func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *HostPortRange) Marshal() (dAtA []byte, err error) { +func (m *IPBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3016,26 +2412,34 @@ func (m *HostPortRange) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { +func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *HostPortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + if len(m.Except) > 0 { + for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Except[iNdEx]) + copy(dAtA[i:], m.Except[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.CIDR) + copy(dAtA[i:], m.CIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *IDRange) Marshal() (dAtA []byte, err error) { +func (m *Ingress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3045,100 +2449,34 @@ func (m *IDRange) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { +func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *IPBlock) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Except) > 0 { - for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Except[iNdEx]) - copy(dAtA[i:], m.Except[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.CIDR) - copy(dAtA[i:], m.CIDR) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Ingress) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x12 { @@ -4001,7 +3339,7 @@ func (m *NetworkPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { +func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4011,16 +3349,26 @@ func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { +func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a { size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -4044,7 +3392,60 @@ func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { +func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4054,12 +3455,12 @@ func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { +func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -4091,7 +3492,7 @@ func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { +func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4101,49 +3502,32 @@ func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { +func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.RuntimeClass != nil { - { - size, err := m.RuntimeClass.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc2 - } - if len(m.AllowedCSIDrivers) > 0 { - for iNdEx := len(m.AllowedCSIDrivers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedCSIDrivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xba + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.RunAsGroup != nil { + i-- + dAtA[i] = 0x1a + if m.Selector != nil { { - size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4151,63 +3535,40 @@ func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb2 - } - if len(m.AllowedProcMountTypes) > 0 { - for iNdEx := len(m.AllowedProcMountTypes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedProcMountTypes[iNdEx]) - copy(dAtA[i:], m.AllowedProcMountTypes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedProcMountTypes[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xaa - } - } - if len(m.ForbiddenSysctls) > 0 { - for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ForbiddenSysctls[iNdEx]) - copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - } + dAtA[i] = 0x12 } - if len(m.AllowedUnsafeSysctls) > 0 { - for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedUnsafeSysctls[iNdEx]) - copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a - } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - if len(m.AllowedFlexVolumes) > 0 { - for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x92 - } + return len(dAtA) - i, nil +} + +func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.AllowedHostPaths) > 0 { - for iNdEx := len(m.AllowedHostPaths) - 1; iNdEx >= 0; iNdEx-- { + return dAtA[:n], nil +} + +func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.AllowedHostPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4215,167 +3576,148 @@ func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - } - } - if m.AllowPrivilegeEscalation != nil { - i-- - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 - } - if m.DefaultAllowPrivilegeEscalation != nil { - i-- - if *m.DefaultAllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0x32 } - i-- - dAtA[i] = 0x78 } + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) i-- - if m.ReadOnlyRootFilesystem { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) i-- - dAtA[i] = 0x70 - { - size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) i-- - dAtA[i] = 0x6a - { - size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 - { - size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - { - size, err := m.SELinux.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) i-- - dAtA[i] = 0x52 + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) i-- - if m.HostIPC { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) i-- - dAtA[i] = 0x48 - i-- - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - i-- - dAtA[i] = 0x40 - if len(m.HostPorts) > 0 { - for iNdEx := len(m.HostPorts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.HostPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + return dAtA[:n], nil +} + +func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x3a + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - i-- - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - if len(m.Volumes) > 0 { - for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Volumes[iNdEx]) - copy(dAtA[i:], m.Volumes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) - i-- - dAtA[i] = 0x2a + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - if len(m.AllowedCapabilities) > 0 { - for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedCapabilities[iNdEx]) - copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x22 - } + return len(dAtA) - i, nil +} + +func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.RequiredDropCapabilities) > 0 { - for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RequiredDropCapabilities[iNdEx]) - copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x1a + return dAtA[:n], nil +} + +func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - if len(m.DefaultAddCapabilities) > 0 { - for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.DefaultAddCapabilities[iNdEx]) - copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x12 + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - i-- - if m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { +func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4385,12 +3727,12 @@ func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { +func (m *Scale) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -4428,7 +3770,7 @@ func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { +func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4438,50 +3780,23 @@ func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { +func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { +func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4491,687 +3806,610 @@ func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { +func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i -= len(m.TargetSelector) + copy(dAtA[i:], m.TargetSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i-- + dAtA[i] = 0x1a + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) i-- dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } } + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - return dAtA[:n], nil + dAtA[offset] = uint8(v) + return base +} +func (m *DaemonSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *DaemonSetCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DaemonSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - i-- - dAtA[i] = 0x20 - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x1a + return n +} + +func (m *DaemonSetSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.Replicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - i-- - dAtA[i] = 0x8 + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + n += 1 + sovGenerated(uint64(m.TemplateGeneration)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) } - return len(dAtA) - i, nil + return n } -func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DaemonSetStatus) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l + n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) + n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberReady)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberAvailable)) + n += 1 + sovGenerated(uint64(m.NumberUnavailable)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return n } -func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DaemonSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil + return n } -func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if m.MaxSurge != nil { - { - size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 } - if m.MaxUnavailable != nil { - { - size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return n } -func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DeploymentRollback) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if m.MaxSurge != nil { - { - size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.MaxUnavailable != nil { - { - size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UpdatedAnnotations) > 0 { + for k, v := range m.UpdatedAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + if m.RollbackTo != nil { + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ProgressDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) + } + return n } -func (m *RunAsGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + return n } -func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *HTTPIngressPath) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.PathType != nil { + l = len(*m.PathType) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *HTTPIngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IPBlock) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RuntimeClassStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if m.DefaultRuntimeClassName != nil { - i -= len(*m.DefaultRuntimeClassName) - copy(dAtA[i:], *m.DefaultRuntimeClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) - i-- - dAtA[i] = 0x12 - } - if len(m.AllowedRuntimeClassNames) > 0 { - for iNdEx := len(m.AllowedRuntimeClassNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedRuntimeClassNames[iNdEx]) - copy(dAtA[i:], m.AllowedRuntimeClassNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedRuntimeClassNames[iNdEx]))) - i-- - dAtA[i] = 0xa + l = len(m.CIDR) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Except) > 0 { + for _, s := range m.Except { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } } - return len(dAtA) - i, nil + return n } -func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *Ingress) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SELinuxStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if m.SELinuxOptions != nil { - { - size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *Scale) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IngressBackend) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ServicePort.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IngressList) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil -} - -func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return n } -func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *IngressLoadBalancerIngress) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - i -= len(m.TargetSelector) - copy(dAtA[i:], m.TargetSelector) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i-- - dAtA[i] = 0x1a - if len(m.Selector) > 0 { - keysForSelector := make([]string, 0, len(m.Selector)) - for k := range m.Selector { - keysForSelector = append(keysForSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { - v := m.Selector[string(keysForSelector[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForSelector[iNdEx]) - copy(dAtA[i:], keysForSelector[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return n } -func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IngressLoadBalancerStatus) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AllowedCSIDriver) Size() (n int) { +func (m *IngressPortStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) n += 1 + l + sovGenerated(uint64(l)) + if m.Error != nil { + l = len(*m.Error) + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *AllowedFlexVolume) Size() (n int) { +func (m *IngressRule) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Driver) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = m.IngressRuleValue.Size() n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *AllowedHostPath) Size() (n int) { +func (m *IngressRuleValue) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.PathPrefix) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 + if m.HTTP != nil { + l = m.HTTP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *DaemonSet) Size() (n int) { +func (m *IngressSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Backend != nil { + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.TLS) > 0 { + for _, e := range m.TLS { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.IngressClassName != nil { + l = len(*m.IngressClassName) + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *DaemonSetCondition) Size() (n int) { +func (m *IngressStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Type) + l = m.LoadBalancer.Size() n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) + return n +} + +func (m *IngressTLS) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SecretName) n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() + return n +} + +func (m *NetworkPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) + l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) + l = m.Status.Size() n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *DaemonSetList) Size() (n int) { +func (m *NetworkPolicyEgressRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.To) > 0 { + for _, e := range m.To { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyIngressRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.From) > 0 { + for _, e := range m.From { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyList) Size() (n int) { if m == nil { return 0 } @@ -5188,70 +4426,92 @@ func (m *DaemonSetList) Size() (n int) { return n } -func (m *DaemonSetSpec) Size() (n int) { +func (m *NetworkPolicyPeer) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Selector != nil { - l = m.Selector.Size() + if m.PodSelector != nil { + l = m.PodSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.UpdateStrategy.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - n += 1 + sovGenerated(uint64(m.TemplateGeneration)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IPBlock != nil { + l = m.IPBlock.Size() + n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *DaemonSetStatus) Size() (n int) { +func (m *NetworkPolicyPort) Size() (n int) { if m == nil { return 0 } var l int _ = l - n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) - n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberReady)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberAvailable)) - n += 1 + sovGenerated(uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - n += 1 + sovGenerated(uint64(*m.CollisionCount)) + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { + if m.Port != nil { + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EndPort != nil { + n += 1 + sovGenerated(uint64(*m.EndPort)) + } + return n +} + +func (m *NetworkPolicySpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PodSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Egress) > 0 { + for _, e := range m.Egress { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.PolicyTypes) > 0 { + for _, s := range m.PolicyTypes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } -func (m *DaemonSetUpdateStrategy) Size() (n int) { +func (m *NetworkPolicyStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } return n } -func (m *Deployment) Size() (n int) { +func (m *ReplicaSet) Size() (n int) { if m == nil { return 0 } @@ -5266,7 +4526,7 @@ func (m *Deployment) Size() (n int) { return n } -func (m *DeploymentCondition) Size() (n int) { +func (m *ReplicaSetCondition) Size() (n int) { if m == nil { return 0 } @@ -5276,18 +4536,16 @@ func (m *DeploymentCondition) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Status) n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) l = len(m.Reason) n += 1 + l + sovGenerated(uint64(l)) l = len(m.Message) n += 1 + l + sovGenerated(uint64(l)) - l = m.LastUpdateTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *DeploymentList) Size() (n int) { +func (m *ReplicaSetList) Size() (n int) { if m == nil { return 0 } @@ -5304,28 +4562,7 @@ func (m *DeploymentList) Size() (n int) { return n } -func (m *DeploymentRollback) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.UpdatedAnnotations) > 0 { - for k, v := range m.UpdatedAnnotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = m.RollbackTo.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DeploymentSpec) Size() (n int) { +func (m *ReplicaSetSpec) Size() (n int) { if m == nil { return 0 } @@ -5340,151 +4577,75 @@ func (m *DeploymentSpec) Size() (n int) { } l = m.Template.Size() n += 1 + l + sovGenerated(uint64(l)) - l = m.Strategy.Size() - n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) - } - n += 2 - if m.RollbackTo != nil { - l = m.RollbackTo.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ProgressDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) - } return n } -func (m *DeploymentStatus) Size() (n int) { +func (m *ReplicaSetStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) if len(m.Conditions) > 0 { for _, e := range m.Conditions { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - n += 1 + sovGenerated(uint64(*m.CollisionCount)) - } return n } -func (m *DeploymentStrategy) Size() (n int) { +func (m *RollbackConfig) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + n += 1 + sovGenerated(uint64(m.Revision)) return n } -func (m *FSGroupStrategyOptions) Size() (n int) { +func (m *RollingUpdateDaemonSet) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HTTPIngressPath) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.PathType != nil { - l = len(*m.PathType) + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() n += 1 + l + sovGenerated(uint64(l)) } - return n -} - -func (m *HTTPIngressRuleValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Paths) > 0 { - for _, e := range m.Paths { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HostPortRange) Size() (n int) { - if m == nil { - return 0 + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) return n } -func (m *IDRange) Size() (n int) { +func (m *RollingUpdateDeployment) Size() (n int) { if m == nil { return 0 } var l int _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) - return n -} - -func (m *IPBlock) Size() (n int) { - if m == nil { - return 0 + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) } - var l int - _ = l - l = len(m.CIDR) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Except) > 0 { - for _, s := range m.Except { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *Ingress) Size() (n int) { +func (m *Scale) Size() (n int) { if m == nil { return 0 } @@ -5499,4006 +4660,729 @@ func (m *Ingress) Size() (n int) { return n } -func (m *IngressBackend) Size() (n int) { +func (m *ScaleSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.ServiceName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ServicePort.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Resource != nil { - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + n += 1 + sovGenerated(uint64(m.Replicas)) return n } -func (m *IngressList) Size() (n int) { +func (m *ScaleStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - return n -} - -func (m *IngressLoadBalancerIngress) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Hostname) + l = len(m.TargetSelector) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } -func (m *IngressLoadBalancerStatus) Size() (n int) { - if m == nil { - return 0 +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DaemonSet) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Ingress) > 0 { - for _, e := range m.Ingress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&DaemonSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetCondition) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&DaemonSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s } - -func (m *IngressPortStatus) Size() (n int) { - if m == nil { - return 0 +func (this *DaemonSetList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Port)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - if m.Error != nil { - l = len(*m.Error) - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForItems := "[]DaemonSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," } - return n + repeatedStringForItems += "}" + s := strings.Join([]string{`&DaemonSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s } - -func (m *IngressRule) Size() (n int) { - if m == nil { - return 0 +func (this *DaemonSetSpec) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - l = m.IngressRuleValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&DaemonSetSpec{`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `}`, + }, "") + return s } - -func (m *IngressRuleValue) Size() (n int) { - if m == nil { - return 0 +func (this *DaemonSetStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.HTTP != nil { - l = m.HTTP.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForConditions := "[]DaemonSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," } - return n + repeatedStringForConditions += "}" + s := strings.Join([]string{`&DaemonSetStatus{`, + `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, + `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, + `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, + `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, + `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, + `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s } - -func (m *IngressSpec) Size() (n int) { - if m == nil { - return 0 +func (this *DaemonSetUpdateStrategy) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.Backend != nil { - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Deployment) String() string { + if this == nil { + return "nil" } - if len(m.TLS) > 0 { - for _, e := range m.TLS { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.IngressClassName != nil { - l = len(*m.IngressClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n + s := strings.Join([]string{`&Deployment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *IngressStatus) Size() (n int) { - if m == nil { - return 0 +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.LoadBalancer.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *IngressTLS) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Hosts) > 0 { - for _, s := range m.Hosts { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *DeploymentList) String() string { + if this == nil { + return "nil" } - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NetworkPolicy) Size() (n int) { - if m == nil { - return 0 + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + repeatedStringForItems += "}" + s := strings.Join([]string{`&DeploymentList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyEgressRule) Size() (n int) { - if m == nil { - return 0 +func (this *DeploymentRollback) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) + for k := range this.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) } - if len(m.To) > 0 { - for _, e := range m.To { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + mapStringForUpdatedAnnotations := "map[string]string{" + for _, k := range keysForUpdatedAnnotations { + mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) } - return n + mapStringForUpdatedAnnotations += "}" + s := strings.Join([]string{`&DeploymentRollback{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, + `RollbackTo:` + strings.Replace(strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyIngressRule) Size() (n int) { - if m == nil { - return 0 +func (this *DeploymentSpec) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&DeploymentSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, + `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStatus) String() string { + if this == nil { + return "nil" } - if len(m.From) > 0 { - for _, e := range m.From { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," } - return n + repeatedStringForConditions += "}" + s := strings.Join([]string{`&DeploymentStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyPeer) Size() (n int) { - if m == nil { - return 0 +func (this *HTTPIngressPath) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.PodSelector != nil { - l = m.PodSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&HTTPIngressPath{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, + `PathType:` + valueToStringGenerated(this.PathType) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPIngressRuleValue) String() string { + if this == nil { + return "nil" } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForPaths := "[]HTTPIngressPath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," } - if m.IPBlock != nil { - l = m.IPBlock.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForPaths += "}" + s := strings.Join([]string{`&HTTPIngressRuleValue{`, + `Paths:` + repeatedStringForPaths + `,`, + `}`, + }, "") + return s +} +func (this *IPBlock) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&IPBlock{`, + `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`, + `Except:` + fmt.Sprintf("%v", this.Except) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyPort) Size() (n int) { - if m == nil { - return 0 +func (this *Ingress) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.Protocol != nil { - l = len(*m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Port != nil { - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.EndPort != nil { - n += 1 + sovGenerated(uint64(*m.EndPort)) + s := strings.Join([]string{`&Ingress{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressBackend) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&IngressBackend{`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicySpec) Size() (n int) { - if m == nil { - return 0 +func (this *IngressList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.PodSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ingress) > 0 { - for _, e := range m.Ingress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForItems := "[]Ingress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," } - if len(m.Egress) > 0 { - for _, e := range m.Egress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForItems += "}" + s := strings.Join([]string{`&IngressList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *IngressLoadBalancerIngress) String() string { + if this == nil { + return "nil" } - if len(m.PolicyTypes) > 0 { - for _, s := range m.PolicyTypes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForPorts := "[]IngressPortStatus{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "IngressPortStatus", "IngressPortStatus", 1), `&`, ``, 1) + "," } - return n + repeatedStringForPorts += "}" + s := strings.Join([]string{`&IngressLoadBalancerIngress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyStatus) Size() (n int) { - if m == nil { - return 0 +func (this *IngressLoadBalancerStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForIngress := "[]IngressLoadBalancerIngress{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "IngressLoadBalancerIngress", "IngressLoadBalancerIngress", 1), `&`, ``, 1) + "," } - return n + repeatedStringForIngress += "}" + s := strings.Join([]string{`&IngressLoadBalancerStatus{`, + `Ingress:` + repeatedStringForIngress + `,`, + `}`, + }, "") + return s } - -func (m *PodSecurityPolicy) Size() (n int) { - if m == nil { - return 0 +func (this *IngressPortStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&IngressPortStatus{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Error:` + valueToStringGenerated(this.Error) + `,`, + `}`, + }, "") + return s } - -func (m *PodSecurityPolicyList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *IngressRule) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&IngressRule{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *PodSecurityPolicySpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *IngressRuleValue) String() string { + if this == nil { + return "nil" } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&IngressRuleValue{`, + `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressSpec) String() string { + if this == nil { + return "nil" } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForTLS := "[]IngressTLS{" + for _, f := range this.TLS { + repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForTLS += "}" + repeatedStringForRules := "[]IngressRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," } - n += 2 - if len(m.HostPorts) > 0 { - for _, e := range m.HostPorts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForRules += "}" + s := strings.Join([]string{`&IngressSpec{`, + `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, + `TLS:` + repeatedStringForTLS + `,`, + `Rules:` + repeatedStringForRules + `,`, + `IngressClassName:` + valueToStringGenerated(this.IngressClassName) + `,`, + `}`, + }, "") + return s +} +func (this *IngressStatus) String() string { + if this == nil { + return "nil" } - n += 2 - n += 2 - l = m.SELinux.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.RunAsUser.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.SupplementalGroups.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FSGroup.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.DefaultAllowPrivilegeEscalation != nil { - n += 2 + s := strings.Join([]string{`&IngressStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "IngressLoadBalancerStatus", "IngressLoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressTLS) String() string { + if this == nil { + return "nil" } - if m.AllowPrivilegeEscalation != nil { - n += 3 + s := strings.Join([]string{`&IngressTLS{`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicy) String() string { + if this == nil { + return "nil" } - if len(m.AllowedHostPaths) > 0 { - for _, e := range m.AllowedHostPaths { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&NetworkPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NetworkPolicyStatus", "NetworkPolicyStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyEgressRule) String() string { + if this == nil { + return "nil" } - if len(m.AllowedFlexVolumes) > 0 { - for _, e := range m.AllowedFlexVolumes { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," } - if len(m.AllowedUnsafeSysctls) > 0 { - for _, s := range m.AllowedUnsafeSysctls { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } + repeatedStringForPorts += "}" + repeatedStringForTo := "[]NetworkPolicyPeer{" + for _, f := range this.To { + repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," } - if len(m.ForbiddenSysctls) > 0 { - for _, s := range m.ForbiddenSysctls { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedProcMountTypes) > 0 { - for _, s := range m.AllowedProcMountTypes { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.RunAsGroup != nil { - l = m.RunAsGroup.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.AllowedCSIDrivers) > 0 { - for _, e := range m.AllowedCSIDrivers { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.RuntimeClass != nil { - l = m.RuntimeClass.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ReplicaSet) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + repeatedStringForTo += "}" + s := strings.Join([]string{`&NetworkPolicyEgressRule{`, + `Ports:` + repeatedStringForPorts + `,`, + `To:` + repeatedStringForTo + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSetCondition) Size() (n int) { - if m == nil { - return 0 +func (this *NetworkPolicyIngressRule) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ReplicaSetList) Size() (n int) { - if m == nil { - return 0 + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForPorts += "}" + repeatedStringForFrom := "[]NetworkPolicyPeer{" + for _, f := range this.From { + repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," } - return n + repeatedStringForFrom += "}" + s := strings.Join([]string{`&NetworkPolicyIngressRule{`, + `Ports:` + repeatedStringForPorts + `,`, + `From:` + repeatedStringForFrom + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSetSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) +func (this *NetworkPolicyList) String() string { + if this == nil { + return "nil" } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForItems := "[]NetworkPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - return n + repeatedStringForItems += "}" + s := strings.Join([]string{`&NetworkPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSetStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *NetworkPolicyPeer) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&NetworkPolicyPeer{`, + `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, + `}`, + }, "") + return s } - -func (m *RollbackConfig) Size() (n int) { - if m == nil { - return 0 +func (this *NetworkPolicyPort) String() string { + if this == nil { + return "nil" } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Revision)) - return n + s := strings.Join([]string{`&NetworkPolicyPort{`, + `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, + `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, + `EndPort:` + valueToStringGenerated(this.EndPort) + `,`, + `}`, + }, "") + return s } - -func (m *RollingUpdateDaemonSet) Size() (n int) { - if m == nil { - return 0 +func (this *NetworkPolicySpec) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForIngress := "[]NetworkPolicyIngressRule{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," } - if m.MaxSurge != nil { - l = m.MaxSurge.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForIngress += "}" + repeatedStringForEgress := "[]NetworkPolicyEgressRule{" + for _, f := range this.Egress { + repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," } - return n + repeatedStringForEgress += "}" + s := strings.Join([]string{`&NetworkPolicySpec{`, + `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Ingress:` + repeatedStringForIngress + `,`, + `Egress:` + repeatedStringForEgress + `,`, + `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, + `}`, + }, "") + return s } - -func (m *RollingUpdateDeployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (this *NetworkPolicyStatus) String() string { + if this == nil { + return "nil" } - if m.MaxSurge != nil { - l = m.MaxSurge.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," } - return n + repeatedStringForConditions += "}" + s := strings.Join([]string{`&NetworkPolicyStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s } - -func (m *RunAsGroupStrategyOptions) Size() (n int) { - if m == nil { - return 0 +func (this *ReplicaSet) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&ReplicaSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetCondition) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&ReplicaSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s } - -func (m *RunAsUserStrategyOptions) Size() (n int) { - if m == nil { - return 0 +func (this *ReplicaSetList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RuntimeClassStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.AllowedRuntimeClassNames) > 0 { - for _, s := range m.AllowedRuntimeClassNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.DefaultRuntimeClassName != nil { - l = len(*m.DefaultRuntimeClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *SELinuxStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Scale) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ScaleSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - return n -} - -func (m *ScaleStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.TargetSelector) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SupplementalGroupsStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AllowedCSIDriver) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedCSIDriver{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *AllowedFlexVolume) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedFlexVolume{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `}`, - }, "") - return s -} -func (this *AllowedHostPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedHostPath{`, - `PathPrefix:` + fmt.Sprintf("%v", this.PathPrefix) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]DaemonSet{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`, - `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]DaemonSetCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&DaemonSetStatus{`, - `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, - `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, - `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, - `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, - `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, - `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, - `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetUpdateStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Deployment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]Deployment{" + repeatedStringForItems := "[]ReplicaSet{" for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," } repeatedStringForItems += "}" - s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentRollback) String() string { - if this == nil { - return "nil" - } - keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) - for k := range this.UpdatedAnnotations { - keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) - mapStringForUpdatedAnnotations := "map[string]string{" - for _, k := range keysForUpdatedAnnotations { - mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) - } - mapStringForUpdatedAnnotations += "}" - s := strings.Join([]string{`&DeploymentRollback{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, - `RollbackTo:` + strings.Replace(strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, - `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, - `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, - `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]DeploymentCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&DeploymentStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentStrategy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, - `}`, - }, "") - return s -} -func (this *FSGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&FSGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *HTTPIngressPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HTTPIngressPath{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, - `PathType:` + valueToStringGenerated(this.PathType) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPIngressRuleValue) String() string { - if this == nil { - return "nil" - } - repeatedStringForPaths := "[]HTTPIngressPath{" - for _, f := range this.Paths { - repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," - } - repeatedStringForPaths += "}" - s := strings.Join([]string{`&HTTPIngressRuleValue{`, - `Paths:` + repeatedStringForPaths + `,`, - `}`, - }, "") - return s -} -func (this *HostPortRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HostPortRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `}`, - }, "") - return s -} -func (this *IDRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IDRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `}`, - }, "") - return s -} -func (this *IPBlock) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IPBlock{`, - `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`, - `Except:` + fmt.Sprintf("%v", this.Except) + `,`, - `}`, - }, "") - return s -} -func (this *Ingress) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Ingress{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressBackend) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressBackend{`, - `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]Ingress{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&IngressList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *IngressLoadBalancerIngress) String() string { - if this == nil { - return "nil" - } - repeatedStringForPorts := "[]IngressPortStatus{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "IngressPortStatus", "IngressPortStatus", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - s := strings.Join([]string{`&IngressLoadBalancerIngress{`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `Ports:` + repeatedStringForPorts + `,`, - `}`, - }, "") - return s -} -func (this *IngressLoadBalancerStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForIngress := "[]IngressLoadBalancerIngress{" - for _, f := range this.Ingress { - repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "IngressLoadBalancerIngress", "IngressLoadBalancerIngress", 1), `&`, ``, 1) + "," - } - repeatedStringForIngress += "}" - s := strings.Join([]string{`&IngressLoadBalancerStatus{`, - `Ingress:` + repeatedStringForIngress + `,`, - `}`, - }, "") - return s -} -func (this *IngressPortStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressPortStatus{`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, - `Error:` + valueToStringGenerated(this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *IngressRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressRule{`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressRuleValue) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressRuleValue{`, - `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressSpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForTLS := "[]IngressTLS{" - for _, f := range this.TLS { - repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," - } - repeatedStringForTLS += "}" - repeatedStringForRules := "[]IngressRule{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForRules += "}" - s := strings.Join([]string{`&IngressSpec{`, - `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, - `TLS:` + repeatedStringForTLS + `,`, - `Rules:` + repeatedStringForRules + `,`, - `IngressClassName:` + valueToStringGenerated(this.IngressClassName) + `,`, - `}`, - }, "") - return s -} -func (this *IngressStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "IngressLoadBalancerStatus", "IngressLoadBalancerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressTLS) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressTLS{`, - `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NetworkPolicyStatus", "NetworkPolicyStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyEgressRule) String() string { - if this == nil { - return "nil" - } - repeatedStringForPorts := "[]NetworkPolicyPort{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - repeatedStringForTo := "[]NetworkPolicyPeer{" - for _, f := range this.To { - repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," - } - repeatedStringForTo += "}" - s := strings.Join([]string{`&NetworkPolicyEgressRule{`, - `Ports:` + repeatedStringForPorts + `,`, - `To:` + repeatedStringForTo + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyIngressRule) String() string { - if this == nil { - return "nil" - } - repeatedStringForPorts := "[]NetworkPolicyPort{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - repeatedStringForFrom := "[]NetworkPolicyPeer{" - for _, f := range this.From { - repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," - } - repeatedStringForFrom += "}" - s := strings.Join([]string{`&NetworkPolicyIngressRule{`, - `Ports:` + repeatedStringForPorts + `,`, - `From:` + repeatedStringForFrom + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]NetworkPolicy{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&NetworkPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyPeer) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyPeer{`, - `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyPort) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyPort{`, - `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, - `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, - `EndPort:` + valueToStringGenerated(this.EndPort) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicySpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForIngress := "[]NetworkPolicyIngressRule{" - for _, f := range this.Ingress { - repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForIngress += "}" - repeatedStringForEgress := "[]NetworkPolicyEgressRule{" - for _, f := range this.Egress { - repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForEgress += "}" - s := strings.Join([]string{`&NetworkPolicySpec{`, - `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Ingress:` + repeatedStringForIngress + `,`, - `Egress:` + repeatedStringForEgress + `,`, - `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]Condition{" - for _, f := range this.Conditions { - repeatedStringForConditions += fmt.Sprintf("%v", f) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&NetworkPolicyStatus{`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSecurityPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicyList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PodSecurityPolicy{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodSecurityPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicySpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForHostPorts := "[]HostPortRange{" - for _, f := range this.HostPorts { - repeatedStringForHostPorts += strings.Replace(strings.Replace(f.String(), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + "," - } - repeatedStringForHostPorts += "}" - repeatedStringForAllowedHostPaths := "[]AllowedHostPath{" - for _, f := range this.AllowedHostPaths { - repeatedStringForAllowedHostPaths += strings.Replace(strings.Replace(f.String(), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedHostPaths += "}" - repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" - for _, f := range this.AllowedFlexVolumes { - repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedFlexVolumes += "}" - repeatedStringForAllowedCSIDrivers := "[]AllowedCSIDriver{" - for _, f := range this.AllowedCSIDrivers { - repeatedStringForAllowedCSIDrivers += strings.Replace(strings.Replace(f.String(), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedCSIDrivers += "}" - s := strings.Join([]string{`&PodSecurityPolicySpec{`, - `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, - `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, - `RequiredDropCapabilities:` + fmt.Sprintf("%v", this.RequiredDropCapabilities) + `,`, - `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, - `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, - `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPorts:` + repeatedStringForHostPorts + `,`, - `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, - `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, - `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, - `RunAsUser:` + strings.Replace(strings.Replace(this.RunAsUser.String(), "RunAsUserStrategyOptions", "RunAsUserStrategyOptions", 1), `&`, ``, 1) + `,`, - `SupplementalGroups:` + strings.Replace(strings.Replace(this.SupplementalGroups.String(), "SupplementalGroupsStrategyOptions", "SupplementalGroupsStrategyOptions", 1), `&`, ``, 1) + `,`, - `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, - `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, - `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, - `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `AllowedHostPaths:` + repeatedStringForAllowedHostPaths + `,`, - `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, - `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, - `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, - `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, - `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, - `AllowedCSIDrivers:` + repeatedStringForAllowedCSIDrivers + `,`, - `RuntimeClass:` + strings.Replace(this.RuntimeClass.String(), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ReplicaSet{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]ReplicaSetCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&ReplicaSetStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *RollbackConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollbackConfig{`, - `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, - `}`, - }, "") - return s -} -func (this *RollingUpdateDaemonSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RollingUpdateDeployment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RunAsGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *RunAsUserStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&RunAsUserStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *RuntimeClassStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuntimeClassStrategyOptions{`, - `AllowedRuntimeClassNames:` + fmt.Sprintf("%v", this.AllowedRuntimeClassNames) + `,`, - `DefaultRuntimeClassName:` + valueToStringGenerated(this.DefaultRuntimeClassName) + `,`, - `}`, - }, "") - return s -} -func (this *SELinuxStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SELinuxStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Scale) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ScaleSpec{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleStatus) String() string { - if this == nil { - return "nil" - } - keysForSelector := make([]string, 0, len(this.Selector)) - for k := range this.Selector { - keysForSelector = append(keysForSelector, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - mapStringForSelector := "map[string]string{" - for _, k := range keysForSelector { - mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) - } - mapStringForSelector += "}" - s := strings.Join([]string{`&ScaleStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `Selector:` + mapStringForSelector + `,`, - `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, - `}`, - }, "") - return s -} -func (this *SupplementalGroupsStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedCSIDriver: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedCSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedHostPath: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedHostPath: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathPrefix", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PathPrefix = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, DaemonSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TemplateGeneration", wireType) - } - m.TemplateGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TemplateGeneration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) - } - m.CurrentNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CurrentNumberScheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) - } - m.NumberMisscheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberMisscheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) - } - m.DesiredNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DesiredNumberScheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) - } - m.NumberReady = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberReady |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) - } - m.UpdatedNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UpdatedNumberScheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) - } - m.NumberAvailable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberAvailable |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) - } - m.NumberUnavailable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberUnavailable |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.CollisionCount = &v - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, DaemonSetCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateDaemonSet{} - } - if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Deployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Deployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Deployment{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.UpdatedAnnotations == nil { - m.UpdatedAnnotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.UpdatedAnnotations[mapkey] = mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Paused = bool(v != 0) - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollbackTo == nil { - m.RollbackTo = &RollbackConfig{} - } - if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ProgressDeadlineSeconds = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + s := strings.Join([]string{`&ReplicaSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetSpec) String() string { + if this == nil { + return "nil" } - - if iNdEx > l { - return io.ErrUnexpectedEOF + s := strings.Join([]string{`&ReplicaSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetStatus) String() string { + if this == nil { + return "nil" } - return nil + repeatedStringForConditions := "[]ReplicaSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ReplicaSetStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s } -func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { +func (this *RollbackConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollbackConfig{`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDaemonSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDaemonSet{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDeployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDeployment{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Scale) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Scale{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleSpec{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleStatus) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ScaleStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DaemonSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9521,55 +5405,17 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - m.UpdatedReplicas = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9579,52 +5425,28 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + if msglen < 0 { + return ErrInvalidLengthGenerated } - m.AvailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AvailableReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF } - m.UnavailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UnavailableReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - case 6: + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9651,16 +5473,15 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, DeploymentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - m.ReadyReplicas = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9670,31 +5491,25 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if msglen < 0 { + return ErrInvalidLengthGenerated } - m.CollisionCount = &v + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9716,7 +5531,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { +func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9739,10 +5554,10 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -9775,11 +5590,43 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) + m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9806,13 +5653,74 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateDeployment{} - } - if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9834,7 +5742,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } return nil } -func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *DaemonSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9857,17 +5765,17 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9877,27 +5785,28 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = FSGroupStrategyType(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9924,8 +5833,8 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, DaemonSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9950,7 +5859,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { +func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9973,17 +5882,17 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9993,27 +5902,31 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10040,96 +5953,13 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := PathType(dAtA[iNdEx:postIndex]) - m.PathType = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10156,66 +5986,34 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Paths = append(m.Paths, HTTPIngressPath{}) - if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HostPortRange) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HostPortRange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HostPortRange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TemplateGeneration", wireType) } - m.Min = 0 + m.TemplateGeneration = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10225,16 +6023,16 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= int32(b&0x7F) << shift + m.TemplateGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } } - case 2: + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) } - m.Max = 0 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10244,11 +6042,12 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= int32(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } + m.RevisionHistoryLimit = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -10270,7 +6069,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } return nil } -func (m *IDRange) Unmarshal(dAtA []byte) error { +func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10293,17 +6092,17 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IDRange: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) } - m.Min = 0 + m.CurrentNumberScheduled = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10313,16 +6112,16 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= int64(b&0x7F) << shift + m.CurrentNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) } - m.Max = 0 + m.NumberMisscheduled = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10332,66 +6131,73 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= int64(b&0x7F) << shift + m.NumberMisscheduled |= int32(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated + m.DesiredNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredNumberScheduled |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IPBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + m.NumberReady = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberReady |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IPBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) } - var stringLen uint64 + m.UpdatedNumberScheduled = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10401,29 +6207,74 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.UpdatedNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + m.NumberAvailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberAvailable |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if postIndex > l { - return io.ErrUnexpectedEOF + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) } - m.CIDR = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + m.NumberUnavailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberUnavailable |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10433,23 +6284,25 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Except = append(m.Except, string(dAtA[iNdEx:postIndex])) + m.Conditions = append(m.Conditions, DaemonSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -10472,7 +6325,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } return nil } -func (m *Ingress) Unmarshal(dAtA []byte) error { +func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10495,17 +6348,17 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Ingress: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10515,28 +6368,27 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10563,40 +6415,10 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDaemonSet{} } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10621,7 +6443,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressBackend) Unmarshal(dAtA []byte) error { +func (m *Deployment) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10644,17 +6466,17 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10664,27 +6486,28 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ServiceName = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10711,13 +6534,13 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10744,10 +6567,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Resource == nil { - m.Resource = &v11.TypedLocalObjectReference{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10772,7 +6592,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressList) Unmarshal(dAtA []byte) error { +func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10795,17 +6615,17 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressList: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10815,30 +6635,29 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10848,79 +6667,27 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Ingress{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -10948,11 +6715,11 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IP = string(dAtA[iNdEx:postIndex]) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -10980,11 +6747,11 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hostname = string(dAtA[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11001,74 +6768,23 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ports = append(m.Ports, IngressPortStatus{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + if msglen < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11095,8 +6811,7 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -11121,7 +6836,7 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { +func (m *DeploymentList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11144,36 +6859,17 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11183,29 +6879,30 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11215,24 +6912,25 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Error = &s + m.Items = append(m.Items, Deployment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -11255,7 +6953,7 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressRule) Unmarshal(dAtA []byte) error { +func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11278,15 +6976,15 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -11314,11 +7012,11 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Host = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11345,63 +7043,107 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.UpdatedAnnotations == nil { + m.UpdatedAnnotations = make(map[string]string) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.UpdatedAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11428,10 +7170,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.HTTP == nil { - m.HTTP = &HTTPIngressRuleValue{} - } - if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -11456,7 +7195,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressSpec) Unmarshal(dAtA []byte) error { +func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11479,15 +7218,35 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11514,16 +7273,16 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Backend == nil { - m.Backend = &IngressBackend{} + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11550,14 +7309,13 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TLS = append(m.TLS, IngressTLS{}) - if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11584,16 +7342,74 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, IngressRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11603,25 +7419,48 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.IngressClassName = &s + if m.RollbackTo == nil { + m.RollbackTo = &RollbackConfig{} + } + if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressDeadlineSeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -11643,7 +7482,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressStatus) Unmarshal(dAtA []byte) error { +func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11666,15 +7505,110 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnavailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11701,10 +7635,50 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -11726,7 +7700,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressTLS) Unmarshal(dAtA []byte) error { +func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11749,15 +7723,15 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -11785,13 +7759,13 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11801,23 +7775,27 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretName = string(dAtA[iNdEx:postIndex]) + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDeployment{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -11840,7 +7818,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { +func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11863,17 +7841,17 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") + return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11883,28 +7861,27 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11931,15 +7908,15 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11949,24 +7926,24 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := PathType(dAtA[iNdEx:postIndex]) + m.PathType = &s iNdEx = postIndex default: iNdEx = preIndex @@ -11989,7 +7966,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { +func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12012,49 +7989,15 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group") + return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ports = append(m.Ports, NetworkPolicyPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12081,8 +8024,8 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.To = append(m.To, NetworkPolicyPeer{}) - if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Paths = append(m.Paths, HTTPIngressPath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12107,7 +8050,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { +func (m *IPBlock) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12130,17 +8073,17 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") + return fmt.Errorf("proto: IPBlock: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12150,31 +8093,29 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, NetworkPolicyPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.CIDR = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12184,25 +8125,23 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.From = append(m.From, NetworkPolicyPeer{}) - if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Except = append(m.Except, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -12225,7 +8164,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { +func (m *Ingress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12248,15 +8187,15 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") + return fmt.Errorf("proto: Ingress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12283,13 +8222,13 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12316,8 +8255,40 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, NetworkPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12342,7 +8313,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { +func (m *IngressBackend) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12365,17 +8336,17 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") + return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12385,31 +8356,27 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PodSelector == nil { - m.PodSelector = &v1.LabelSelector{} - } - if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ServiceName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12436,16 +8403,13 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12472,10 +8436,10 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.IPBlock == nil { - m.IPBlock = &IPBlock{} + if m.Resource == nil { + m.Resource = &v11.TypedLocalObjectReference{} } - if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12500,7 +8464,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { +func (m *IngressList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12523,17 +8487,17 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") + return fmt.Errorf("proto: IngressList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12543,28 +8507,28 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) - m.Protocol = &s + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12591,33 +8555,11 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Port == nil { - m.Port = &intstr.IntOrString{} - } - if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, Ingress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndPort", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EndPort = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -12639,7 +8581,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { +func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12662,17 +8604,17 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") + return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12682,30 +8624,29 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.IP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12715,29 +8656,27 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12764,16 +8703,66 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Egress = append(m.Egress, NetworkPolicyEgressRule{}) - if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, IngressPortStatus{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12783,23 +8772,25 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex])) + m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -12822,7 +8813,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { +func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12845,17 +8836,36 @@ func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyStatus: wiretype end group for non-group") + return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12865,25 +8875,56 @@ func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, v1.Condition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Error = &s iNdEx = postIndex default: iNdEx = preIndex @@ -12906,7 +8947,7 @@ func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { +func (m *IngressRule) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12929,17 +8970,17 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicy: wiretype end group for non-group") + return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12949,28 +8990,27 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Host = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12997,7 +9037,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13022,7 +9062,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { +func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13045,15 +9085,15 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicyList: wiretype end group for non-group") + return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13080,41 +9120,10 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + if m.HTTP == nil { + m.HTTP = &HTTPIngressRuleValue{} } - m.Items = append(m.Items, PodSecurityPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13139,7 +9148,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { +func (m *IngressSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13162,37 +9171,17 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicySpec: wiretype end group for non-group") + return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Privileged = bool(v != 0) - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddCapabilities", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13202,29 +9191,33 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + if m.Backend == nil { + m.Backend = &IngressBackend{} + } + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredDropCapabilities", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13234,29 +9227,31 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + m.TLS = append(m.TLS, IngressTLS{}) + if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCapabilities", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13266,27 +9261,29 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + m.Rules = append(m.Rules, IngressRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13314,31 +9311,62 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Volumes = append(m.Volumes, FSType(dAtA[iNdEx:postIndex])) + s := string(dAtA[iNdEx:postIndex]) + m.IngressClassName = &s iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated } - m.HostNetwork = bool(v != 0) - case 7: + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPorts", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13365,16 +9393,65 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.HostPorts = append(m.HostPorts, HostPortRange{}) - if err := m.HostPorts[len(m.HostPorts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressTLS) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13384,37 +9461,29 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.HostPID = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - m.HostIPC = bool(v != 0) - case 10: + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinux", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13424,28 +9493,77 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.SELinux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 11: + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13472,13 +9590,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.RunAsUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 12: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13505,13 +9623,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.SupplementalGroups.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 13: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13538,75 +9656,63 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.FSGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated } - m.ReadOnlyRootFilesystem = bool(v != 0) - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultAllowPrivilegeEscalation", wireType) + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - b := bool(v != 0) - m.DefaultAllowPrivilegeEscalation = &b - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) + if iNdEx >= l { + return io.ErrUnexpectedEOF } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - b := bool(v != 0) - m.AllowPrivilegeEscalation = &b - case 17: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13633,14 +9739,14 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedHostPaths = append(m.AllowedHostPaths, AllowedHostPath{}) - if err := m.AllowedHostPaths[len(m.AllowedHostPaths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 18: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13667,80 +9773,66 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) - if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.To = append(m.To, NetworkPolicyPeer{}) + if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 19: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 21: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedProcMountTypes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13750,27 +9842,29 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 22: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13797,16 +9891,64 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RunAsGroup == nil { - m.RunAsGroup = &RunAsGroupStrategyOptions{} - } - if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.From = append(m.From, NetworkPolicyPeer{}) + if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 23: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13833,14 +9975,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) - if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 24: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClass", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13867,10 +10008,8 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RuntimeClass == nil { - m.RuntimeClass = &RuntimeClassStrategyOptions{} - } - if err := m.RuntimeClass.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, NetworkPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13895,7 +10034,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSet) Unmarshal(dAtA []byte) error { +func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13918,15 +10057,15 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13953,13 +10092,16 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.PodSelector == nil { + m.PodSelector = &v1.LabelSelector{} + } + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13986,13 +10128,16 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14019,7 +10164,10 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.IPBlock == nil { + m.IPBlock = &IPBlock{} + } + if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14044,7 +10192,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { +func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14067,15 +10215,15 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14103,13 +10251,120 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + m.Protocol = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Port == nil { + m.Port = &intstr.IntOrString{} + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndPort", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EndPort = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14119,27 +10374,28 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14166,15 +10422,16 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14184,27 +10441,29 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(dAtA[iNdEx:postIndex]) + m.Egress = append(m.Egress, NetworkPolicyEgressRule{}) + if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14232,7 +10491,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -14255,7 +10514,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { +func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14278,48 +10537,15 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkPolicyStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14346,8 +10572,8 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ReplicaSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14372,7 +10598,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { +func (m *ReplicaSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14395,17 +10621,17 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14415,15 +10641,28 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Replicas = &v + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14450,16 +10689,13 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14486,29 +10722,10 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -14530,7 +10747,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14553,17 +10770,17 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - m.Replicas = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14573,35 +10790,29 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - m.FullyLabeledReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FullyLabeledReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF } - m.ObservedGeneration = 0 + m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14611,35 +10822,29 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - m.ReadyReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } - m.AvailableReplicas = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14649,16 +10854,30 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 6: + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14668,81 +10887,29 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, ReplicaSetCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RollbackConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - m.Revision = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14752,11 +10919,24 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -14778,7 +10958,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14801,15 +10981,15 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14836,16 +11016,13 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} - } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14872,10 +11049,8 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxSurge == nil { - m.MaxSurge = &intstr.IntOrString{} - } - if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ReplicaSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14900,7 +11075,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } return nil } -func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14923,15 +11098,35 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14958,16 +11153,16 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14994,13 +11189,29 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxSurge == nil { - m.MaxSurge = &intstr.IntOrString{} - } - if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -15022,7 +11233,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } return nil } -func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15045,17 +11256,74 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + } + m.FullyLabeledReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FullyLabeledReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) } - var stringLen uint64 + m.ReadyReplicas = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15065,27 +11333,33 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - m.Rule = RunAsGroupStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15112,8 +11386,8 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, ReplicaSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -15138,7 +11412,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *RollbackConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15161,49 +11435,17 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = RunAsUserStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) } - var msglen int + m.Revision = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15213,26 +11455,11 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -15254,7 +11481,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15277,17 +11504,17 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedRuntimeClassNames", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15297,29 +11524,33 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedRuntimeClassNames = append(m.AllowedRuntimeClassNames, string(dAtA[iNdEx:postIndex])) + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultRuntimeClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15329,24 +11560,27 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.DefaultRuntimeClassName = &s + if m.MaxSurge == nil { + m.MaxSurge = &intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -15369,7 +11603,7 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15392,17 +11626,17 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SELinuxStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SELinuxStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15412,27 +11646,31 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = SELinuxStrategy(dAtA[iNdEx:postIndex]) + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15459,10 +11697,10 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SELinuxOptions == nil { - m.SELinuxOptions = &v11.SELinuxOptions{} + if m.MaxSurge == nil { + m.MaxSurge = &intstr.IntOrString{} } - if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -15933,122 +12171,6 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = SupplementalGroupsStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto index 0509bc3d6..3ab6a093b 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -30,37 +30,6 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/extensions/v1beta1"; -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -message AllowedCSIDriver { - // Name is the registered name of the CSI driver - optional string name = 1; -} - -// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. -// Deprecated: use AllowedFlexVolume from policy API Group instead. -message AllowedFlexVolume { - // driver is the name of the Flexvolume driver. - optional string driver = 1; -} - -// AllowedHostPath defines the host volume conditions that will be enabled by a policy -// for pods to use. It requires the path prefix to be defined. -// Deprecated: use AllowedHostPath from policy API Group instead. -message AllowedHostPath { - // pathPrefix is the path prefix that the host volume must match. - // It does not support `*`. - // Trailing slashes are trimmed when validating the path prefix with a host path. - // - // Examples: - // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` - // `/foo` would not allow `/food` or `/etc/foo` - optional string pathPrefix = 1; - - // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. - // +optional - optional bool readOnly = 2; -} - // DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for // more information. // DaemonSet represents the configuration of a daemon set. @@ -398,19 +367,6 @@ message DeploymentStrategy { optional RollingUpdateDeployment rollingUpdate = 2; } -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use FSGroupStrategyOptions from policy API Group instead. -message FSGroupStrategyOptions { - // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - // +optional - optional string rule = 1; - - // ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. message HTTPIngressPath { @@ -453,27 +409,6 @@ message HTTPIngressRuleValue { repeated HTTPIngressPath paths = 1; } -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -// Deprecated: use HostPortRange from policy API Group instead. -message HostPortRange { - // min is the start of the range, inclusive. - optional int32 min = 1; - - // max is the end of the range, inclusive. - optional int32 max = 2; -} - -// IDRange provides a min/max of an allowed range of IDs. -// Deprecated: use IDRange from policy API Group instead. -message IDRange { - // min is the start of the range, inclusive. - optional int64 min = 1; - - // max is the end of the range, inclusive. - optional int64 max = 2; -} - // DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. // IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed // to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs @@ -875,164 +810,6 @@ message NetworkPolicyStatus { repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; } -// PodSecurityPolicy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -// Deprecated: use PodSecurityPolicy from policy API Group instead. -message PodSecurityPolicy { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec defines the policy enforced. - // +optional - optional PodSecurityPolicySpec spec = 2; -} - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -// Deprecated: use PodSecurityPolicyList from policy API Group instead. -message PodSecurityPolicyList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is a list of schema objects. - repeated PodSecurityPolicy items = 2; -} - -// PodSecurityPolicySpec defines the policy enforced. -// Deprecated: use PodSecurityPolicySpec from policy API Group instead. -message PodSecurityPolicySpec { - // privileged determines if a pod can request to be run as privileged. - // +optional - optional bool privileged = 1; - - // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capability in both - // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly - // allowed, and need not be included in the allowedCapabilities list. - // +optional - repeated string defaultAddCapabilities = 2; - - // requiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - // +optional - repeated string requiredDropCapabilities = 3; - - // allowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. - // +optional - repeated string allowedCapabilities = 4; - - // volumes is an allowlist of volume plugins. Empty indicates that - // no volumes may be used. To allow all volumes you may use '*'. - // +optional - repeated string volumes = 5; - - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - // +optional - optional bool hostNetwork = 6; - - // hostPorts determines which host port ranges are allowed to be exposed. - // +optional - repeated HostPortRange hostPorts = 7; - - // hostPID determines if the policy allows the use of HostPID in the pod spec. - // +optional - optional bool hostPID = 8; - - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - // +optional - optional bool hostIPC = 9; - - // seLinux is the strategy that will dictate the allowable labels that may be set. - optional SELinuxStrategyOptions seLinux = 10; - - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - optional RunAsUserStrategyOptions runAsUser = 11; - - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - optional RunAsGroupStrategyOptions runAsGroup = 22; - - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - optional SupplementalGroupsStrategyOptions supplementalGroups = 12; - - // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. - optional FSGroupStrategyOptions fsGroup = 13; - - // readOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - // +optional - optional bool readOnlyRootFilesystem = 14; - - // defaultAllowPrivilegeEscalation controls the default setting for whether a - // process can gain more privileges than its parent process. - // +optional - optional bool defaultAllowPrivilegeEscalation = 15; - - // allowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. If unspecified, defaults to true. - // +optional - optional bool allowPrivilegeEscalation = 16; - - // allowedHostPaths is an allowlist of host paths. Empty indicates - // that all host paths may be used. - // +optional - repeated AllowedHostPath allowedHostPaths = 17; - - // allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all - // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes - // is allowed in the "volumes" field. - // +optional - repeated AllowedFlexVolume allowedFlexVolumes = 18; - - // AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // +optional - repeated AllowedCSIDriver allowedCSIDrivers = 23; - - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. - // Kubelet has to allowlist all unsafe sysctls explicitly to avoid rejection. - // - // Examples: - // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. - // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - // +optional - repeated string allowedUnsafeSysctls = 19; - - // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // - // Examples: - // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. - // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - // +optional - repeated string forbiddenSysctls = 20; - - // AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. - // Empty or nil indicates that only the DefaultProcMountType may be used. - // This requires the ProcMountType feature flag to be enabled. - // +optional - repeated string allowedProcMountTypes = 21; - - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - optional RuntimeClassStrategyOptions runtimeClass = 24; -} - // DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for // more information. // ReplicaSet ensures that a specified number of pod replicas are running at any given time. @@ -1227,57 +1004,6 @@ message RollingUpdateDeployment { optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead. -message RunAsGroupStrategyOptions { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - optional string rule = 1; - - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsUserStrategyOptions from policy API Group instead. -message RunAsUserStrategyOptions { - // rule is the strategy that will dictate the allowable RunAsUser values that may be set. - optional string rule = 1; - - // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -message RuntimeClassStrategyOptions { - // allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - repeated string allowedRuntimeClassNames = 1; - - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - optional string defaultRuntimeClassName = 2; -} - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use SELinuxStrategyOptions from policy API Group instead. -message SELinuxStrategyOptions { - // rule is the strategy that will dictate the allowable labels that may be set. - optional string rule = 1; - - // seLinuxOptions required to run as; required for MustRunAs - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2; -} - // represents a scaling request for a resource. message Scale { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. @@ -1305,7 +1031,7 @@ message ScaleStatus { // actual number of observed instances of the scaled object. optional int32 replicas = 1; - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional // +mapType=atomic map selector = 2; @@ -1320,16 +1046,3 @@ message ScaleStatus { optional string targetSelector = 3; } -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead. -message SupplementalGroupsStrategyOptions { - // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - // +optional - optional string rule = 1; - - // ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - diff --git a/vendor/k8s.io/api/extensions/v1beta1/register.go b/vendor/k8s.io/api/extensions/v1beta1/register.go index c69eff0bc..d58908edc 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/register.go +++ b/vendor/k8s.io/api/extensions/v1beta1/register.go @@ -54,8 +54,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &IngressList{}, &ReplicaSet{}, &ReplicaSetList{}, - &PodSecurityPolicy{}, - &PodSecurityPolicyList{}, &NetworkPolicy{}, &NetworkPolicyList{}, ) diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go index be1b95e62..c0ac6fa25 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -35,7 +35,7 @@ type ScaleStatus struct { // actual number of observed instances of the scaled object. Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional // +mapType=atomic Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` @@ -1021,389 +1021,6 @@ type ReplicaSetCondition struct { Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.2 -// +k8s:prerelease-lifecycle-gen:deprecated=1.11 -// +k8s:prerelease-lifecycle-gen:removed=1.16 -// +k8s:prerelease-lifecycle-gen:replacement=policy,v1beta1,PodSecurityPolicy - -// PodSecurityPolicy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -// Deprecated: use PodSecurityPolicy from policy API Group instead. -type PodSecurityPolicy struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec defines the policy enforced. - // +optional - Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// PodSecurityPolicySpec defines the policy enforced. -// Deprecated: use PodSecurityPolicySpec from policy API Group instead. -type PodSecurityPolicySpec struct { - // privileged determines if a pod can request to be run as privileged. - // +optional - Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"` - // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capability in both - // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly - // allowed, and need not be included in the allowedCapabilities list. - // +optional - DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // requiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - // +optional - RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // allowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. - // +optional - AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // volumes is an allowlist of volume plugins. Empty indicates that - // no volumes may be used. To allow all volumes you may use '*'. - // +optional - Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"` - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - // +optional - HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"` - // hostPorts determines which host port ranges are allowed to be exposed. - // +optional - HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"` - // hostPID determines if the policy allows the use of HostPID in the pod spec. - // +optional - HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"` - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - // +optional - HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"` - // seLinux is the strategy that will dictate the allowable labels that may be set. - SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"` - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` - // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. - FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"` - // readOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - // +optional - ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"` - // defaultAllowPrivilegeEscalation controls the default setting for whether a - // process can gain more privileges than its parent process. - // +optional - DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation"` - // allowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. If unspecified, defaults to true. - // +optional - AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"` - // allowedHostPaths is an allowlist of host paths. Empty indicates - // that all host paths may be used. - // +optional - AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"` - // allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all - // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes - // is allowed in the "volumes" field. - // +optional - AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` - // AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // +optional - AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. - // Kubelet has to allowlist all unsafe sysctls explicitly to avoid rejection. - // - // Examples: - // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. - // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - // +optional - AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,19,rep,name=allowedUnsafeSysctls"` - // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // - // Examples: - // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. - // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - // +optional - ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,20,rep,name=forbiddenSysctls"` - // AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. - // Empty or nil indicates that only the DefaultProcMountType may be used. - // This requires the ProcMountType feature flag to be enabled. - // +optional - AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - RuntimeClass *RuntimeClassStrategyOptions `json:"runtimeClass,omitempty" protobuf:"bytes,24,opt,name=runtimeClass"` -} - -// AllowedHostPath defines the host volume conditions that will be enabled by a policy -// for pods to use. It requires the path prefix to be defined. -// Deprecated: use AllowedHostPath from policy API Group instead. -type AllowedHostPath struct { - // pathPrefix is the path prefix that the host volume must match. - // It does not support `*`. - // Trailing slashes are trimmed when validating the path prefix with a host path. - // - // Examples: - // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` - // `/foo` would not allow `/food` or `/etc/foo` - PathPrefix string `json:"pathPrefix,omitempty" protobuf:"bytes,1,rep,name=pathPrefix"` - - // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` -} - -// FSType gives strong typing to different file systems that are used by volumes. -// Deprecated: use FSType from policy API Group instead. -type FSType string - -const ( - AzureFile FSType = "azureFile" - Flocker FSType = "flocker" - FlexVolume FSType = "flexVolume" - HostPath FSType = "hostPath" - EmptyDir FSType = "emptyDir" - GCEPersistentDisk FSType = "gcePersistentDisk" - AWSElasticBlockStore FSType = "awsElasticBlockStore" - GitRepo FSType = "gitRepo" - Secret FSType = "secret" - NFS FSType = "nfs" - ISCSI FSType = "iscsi" - Glusterfs FSType = "glusterfs" - PersistentVolumeClaim FSType = "persistentVolumeClaim" - RBD FSType = "rbd" - Cinder FSType = "cinder" - CephFS FSType = "cephFS" - DownwardAPI FSType = "downwardAPI" - FC FSType = "fc" - ConfigMap FSType = "configMap" - Quobyte FSType = "quobyte" - AzureDisk FSType = "azureDisk" - CSI FSType = "csi" - All FSType = "*" -) - -// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. -// Deprecated: use AllowedFlexVolume from policy API Group instead. -type AllowedFlexVolume struct { - // driver is the name of the Flexvolume driver. - Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` -} - -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -type AllowedCSIDriver struct { - // Name is the registered name of the CSI driver - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` -} - -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -// Deprecated: use HostPortRange from policy API Group instead. -type HostPortRange struct { - // min is the start of the range, inclusive. - Min int32 `json:"min" protobuf:"varint,1,opt,name=min"` - // max is the end of the range, inclusive. - Max int32 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use SELinuxStrategyOptions from policy API Group instead. -type SELinuxStrategyOptions struct { - // rule is the strategy that will dictate the allowable labels that may be set. - Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"` - // seLinuxOptions required to run as; required for MustRunAs - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` -} - -// SELinuxStrategy denotes strategy types for generating SELinux options for a -// Security Context. -// Deprecated: use SELinuxStrategy from policy API Group instead. -type SELinuxStrategy string - -const ( - // SELinuxStrategyMustRunAs means that container must have SELinux labels of X applied. - // Deprecated: use SELinuxStrategyMustRunAs from policy API Group instead. - SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" - // SELinuxStrategyRunAsAny means that container may make requests for any SELinux context labels. - // Deprecated: use SELinuxStrategyRunAsAny from policy API Group instead. - SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" -) - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsUserStrategyOptions from policy API Group instead. -type RunAsUserStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsUser values that may be set. - Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"` - // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead. -type RunAsGroupStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"` - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// IDRange provides a min/max of an allowed range of IDs. -// Deprecated: use IDRange from policy API Group instead. -type IDRange struct { - // min is the start of the range, inclusive. - Min int64 `json:"min" protobuf:"varint,1,opt,name=min"` - // max is the end of the range, inclusive. - Max int64 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a -// Security Context. -// Deprecated: use RunAsUserStrategy from policy API Group instead. -type RunAsUserStrategy string - -const ( - // RunAsUserStrategyMustRunAs means that container must run as a particular uid. - // Deprecated: use RunAsUserStrategyMustRunAs from policy API Group instead. - RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" - // RunAsUserStrategyMustRunAsNonRoot means that container must run as a non-root uid. - // Deprecated: use RunAsUserStrategyMustRunAsNonRoot from policy API Group instead. - RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" - // RunAsUserStrategyRunAsAny means that container may make requests for any uid. - // Deprecated: use RunAsUserStrategyRunAsAny from policy API Group instead. - RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" -) - -// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a -// Security Context. -// Deprecated: use RunAsGroupStrategy from policy API Group instead. -type RunAsGroupStrategy string - -const ( - // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. - // However, when RunAsGroup are specified, they have to fall in the defined range. - RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" - // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. - // Deprecated: use RunAsGroupStrategyMustRunAs from policy API Group instead. - RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" - // RunAsGroupStrategyRunAsAny means that container may make requests for any gid. - // Deprecated: use RunAsGroupStrategyRunAsAny from policy API Group instead. - RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" -) - -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use FSGroupStrategyOptions from policy API Group instead. -type FSGroupStrategyOptions struct { - // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - // +optional - Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"` - // ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// FSGroupStrategyType denotes strategy types for generating FSGroup values for a -// SecurityContext -// Deprecated: use FSGroupStrategyType from policy API Group instead. -type FSGroupStrategyType string - -const ( - // FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied. - // Deprecated: use FSGroupStrategyMustRunAs from policy API Group instead. - FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" - // FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels. - // Deprecated: use FSGroupStrategyRunAsAny from policy API Group instead. - FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" -) - -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead. -type SupplementalGroupsStrategyOptions struct { - // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - // +optional - Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"` - // ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental -// groups for a SecurityContext. -// Deprecated: use SupplementalGroupsStrategyType from policy API Group instead. -type SupplementalGroupsStrategyType string - -const ( - // SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid. - // Deprecated: use SupplementalGroupsStrategyMustRunAs from policy API Group instead. - SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" - // SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid. - // Deprecated: use SupplementalGroupsStrategyRunAsAny from policy API Group instead. - SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" -) - -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -type RuntimeClassStrategyOptions struct { - // allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames" protobuf:"bytes,1,rep,name=allowedRuntimeClassNames"` - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty" protobuf:"bytes,2,opt,name=defaultRuntimeClassName"` -} - -// AllowAllRuntimeClassNames can be used as a value for the -// RuntimeClassStrategyOptions.AllowedRuntimeClassNames field and means that any RuntimeClassName is -// allowed. -const AllowAllRuntimeClassNames = "*" - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.2 -// +k8s:prerelease-lifecycle-gen:deprecated=1.11 -// +k8s:prerelease-lifecycle-gen:removed=1.16 -// +k8s:prerelease-lifecycle-gen:replacement=policy,v1beta1,PodSecurityPolicyList - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -// Deprecated: use PodSecurityPolicyList from policy API Group instead. -type PodSecurityPolicyList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is a list of schema objects. - Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` -} - // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.3 diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index 302eb9538..39aaf4853 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -24,37 +24,9 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AllowedCSIDriver = map[string]string{ - "": "AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.", - "name": "Name is the registered name of the CSI driver", -} - -func (AllowedCSIDriver) SwaggerDoc() map[string]string { - return map_AllowedCSIDriver -} - -var map_AllowedFlexVolume = map[string]string{ - "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used. Deprecated: use AllowedFlexVolume from policy API Group instead.", - "driver": "driver is the name of the Flexvolume driver.", -} - -func (AllowedFlexVolume) SwaggerDoc() map[string]string { - return map_AllowedFlexVolume -} - -var map_AllowedHostPath = map[string]string{ - "": "AllowedHostPath defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined. Deprecated: use AllowedHostPath from policy API Group instead.", - "pathPrefix": "pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\n\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`", - "readOnly": "when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.", -} - -func (AllowedHostPath) SwaggerDoc() map[string]string { - return map_AllowedHostPath -} - var map_DaemonSet = map[string]string{ "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -220,16 +192,6 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { return map_DeploymentStrategy } -var map_FSGroupStrategyOptions = map[string]string{ - "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy. Deprecated: use FSGroupStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate what FSGroup is used in the SecurityContext.", - "ranges": "ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_FSGroupStrategyOptions -} - var map_HTTPIngressPath = map[string]string{ "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.", @@ -250,26 +212,6 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { return map_HTTPIngressRuleValue } -var map_HostPortRange = map[string]string{ - "": "HostPortRange defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined. Deprecated: use HostPortRange from policy API Group instead.", - "min": "min is the start of the range, inclusive.", - "max": "max is the end of the range, inclusive.", -} - -func (HostPortRange) SwaggerDoc() map[string]string { - return map_HostPortRange -} - -var map_IDRange = map[string]string{ - "": "IDRange provides a min/max of an allowed range of IDs. Deprecated: use IDRange from policy API Group instead.", - "min": "min is the start of the range, inclusive.", - "max": "max is the end of the range, inclusive.", -} - -func (IDRange) SwaggerDoc() map[string]string { - return map_IDRange -} - var map_IPBlock = map[string]string{ "": "DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. IPBlock describes a particular CIDR (Ex. \"192.168.1.0/24\",\"2001:db8::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\"", @@ -476,58 +418,6 @@ func (NetworkPolicyStatus) SwaggerDoc() map[string]string { return map_NetworkPolicyStatus } -var map_PodSecurityPolicy = map[string]string{ - "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated: use PodSecurityPolicy from policy API Group instead.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec defines the policy enforced.", -} - -func (PodSecurityPolicy) SwaggerDoc() map[string]string { - return map_PodSecurityPolicy -} - -var map_PodSecurityPolicyList = map[string]string{ - "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects. Deprecated: use PodSecurityPolicyList from policy API Group instead.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is a list of schema objects.", -} - -func (PodSecurityPolicyList) SwaggerDoc() map[string]string { - return map_PodSecurityPolicyList -} - -var map_PodSecurityPolicySpec = map[string]string{ - "": "PodSecurityPolicySpec defines the policy enforced. Deprecated: use PodSecurityPolicySpec from policy API Group instead.", - "privileged": "privileged determines if a pod can request to be run as privileged.", - "defaultAddCapabilities": "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list.", - "requiredDropCapabilities": "requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", - "allowedCapabilities": "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.", - "volumes": "volumes is an allowlist of volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.", - "hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", - "hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.", - "hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.", - "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", - "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", - "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", - "runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.", - "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", - "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", - "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", - "defaultAllowPrivilegeEscalation": "defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", - "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", - "allowedHostPaths": "allowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used.", - "allowedFlexVolumes": "allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", - "allowedCSIDrivers": "AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes.", - "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to allowlist all unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", - "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", - "allowedProcMountTypes": "AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", - "runtimeClass": "runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled.", -} - -func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { - return map_PodSecurityPolicySpec -} - var map_ReplicaSet = map[string]string{ "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -617,46 +507,6 @@ func (RollingUpdateDeployment) SwaggerDoc() map[string]string { return map_RollingUpdateDeployment } -var map_RunAsGroupStrategyOptions = map[string]string{ - "": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsGroupStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.", - "ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsGroupStrategyOptions -} - -var map_RunAsUserStrategyOptions = map[string]string{ - "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsUserStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", - "ranges": "ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsUserStrategyOptions -} - -var map_RuntimeClassStrategyOptions = map[string]string{ - "": "RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.", - "allowedRuntimeClassNames": "allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of \"*\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.", - "defaultRuntimeClassName": "defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.", -} - -func (RuntimeClassStrategyOptions) SwaggerDoc() map[string]string { - return map_RuntimeClassStrategyOptions -} - -var map_SELinuxStrategyOptions = map[string]string{ - "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use SELinuxStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate the allowable labels that may be set.", - "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", -} - -func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { - return map_SELinuxStrategyOptions -} - var map_Scale = map[string]string{ "": "represents a scaling request for a resource.", "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", @@ -680,7 +530,7 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "represents the current status of a scale subresource.", "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "selector": "selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", } @@ -688,14 +538,4 @@ func (ScaleStatus) SwaggerDoc() map[string]string { return map_ScaleStatus } -var map_SupplementalGroupsStrategyOptions = map[string]string{ - "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.", - "ranges": "ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { - return map_SupplementalGroupsStrategyOptions -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go index 671aa2d9d..b6e927299 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -28,54 +28,6 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedCSIDriver) DeepCopyInto(out *AllowedCSIDriver) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedCSIDriver. -func (in *AllowedCSIDriver) DeepCopy() *AllowedCSIDriver { - if in == nil { - return nil - } - out := new(AllowedCSIDriver) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. -func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { - if in == nil { - return nil - } - out := new(AllowedFlexVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath. -func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { - if in == nil { - return nil - } - out := new(AllowedHostPath) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DaemonSet) DeepCopyInto(out *DaemonSet) { *out = *in @@ -435,27 +387,6 @@ func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions. -func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions { - if in == nil { - return nil - } - out := new(FSGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) { *out = *in @@ -501,38 +432,6 @@ func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostPortRange) DeepCopyInto(out *HostPortRange) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortRange. -func (in *HostPortRange) DeepCopy() *HostPortRange { - if in == nil { - return nil - } - out := new(HostPortRange) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IDRange) DeepCopyInto(out *IDRange) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange. -func (in *IDRange) DeepCopy() *IDRange { - if in == nil { - return nil - } - out := new(IDRange) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPBlock) DeepCopyInto(out *IPBlock) { *out = *in @@ -1062,161 +961,6 @@ func (in *NetworkPolicyStatus) DeepCopy() *NetworkPolicyStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicy) DeepCopyInto(out *PodSecurityPolicy) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicy. -func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy { - if in == nil { - return nil - } - out := new(PodSecurityPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodSecurityPolicy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyList. -func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList { - if in == nil { - return nil - } - out := new(PodSecurityPolicyList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { - *out = *in - if in.DefaultAddCapabilities != nil { - in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.RequiredDropCapabilities != nil { - in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.AllowedCapabilities != nil { - in, out := &in.AllowedCapabilities, &out.AllowedCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]FSType, len(*in)) - copy(*out, *in) - } - if in.HostPorts != nil { - in, out := &in.HostPorts, &out.HostPorts - *out = make([]HostPortRange, len(*in)) - copy(*out, *in) - } - in.SELinux.DeepCopyInto(&out.SELinux) - in.RunAsUser.DeepCopyInto(&out.RunAsUser) - if in.RunAsGroup != nil { - in, out := &in.RunAsGroup, &out.RunAsGroup - *out = new(RunAsGroupStrategyOptions) - (*in).DeepCopyInto(*out) - } - in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) - in.FSGroup.DeepCopyInto(&out.FSGroup) - if in.DefaultAllowPrivilegeEscalation != nil { - in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.AllowPrivilegeEscalation != nil { - in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.AllowedHostPaths != nil { - in, out := &in.AllowedHostPaths, &out.AllowedHostPaths - *out = make([]AllowedHostPath, len(*in)) - copy(*out, *in) - } - if in.AllowedFlexVolumes != nil { - in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes - *out = make([]AllowedFlexVolume, len(*in)) - copy(*out, *in) - } - if in.AllowedCSIDrivers != nil { - in, out := &in.AllowedCSIDrivers, &out.AllowedCSIDrivers - *out = make([]AllowedCSIDriver, len(*in)) - copy(*out, *in) - } - if in.AllowedUnsafeSysctls != nil { - in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ForbiddenSysctls != nil { - in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.AllowedProcMountTypes != nil { - in, out := &in.AllowedProcMountTypes, &out.AllowedProcMountTypes - *out = make([]corev1.ProcMountType, len(*in)) - copy(*out, *in) - } - if in.RuntimeClass != nil { - in, out := &in.RuntimeClass, &out.RuntimeClass - *out = new(RuntimeClassStrategyOptions) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySpec. -func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec { - if in == nil { - return nil - } - out := new(PodSecurityPolicySpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { *out = *in @@ -1413,95 +1157,6 @@ func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. -func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions. -func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsUserStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) { - *out = *in - if in.AllowedRuntimeClassNames != nil { - in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.DefaultRuntimeClassName != nil { - in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions. -func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions { - if in == nil { - return nil - } - out := new(RuntimeClassStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { - *out = *in - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(corev1.SELinuxOptions) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxStrategyOptions. -func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions { - if in == nil { - return nil - } - out := new(SELinuxStrategyOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Scale) DeepCopyInto(out *Scale) { *out = *in @@ -1568,24 +1223,3 @@ func (in *ScaleStatus) DeepCopy() *ScaleStatus { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions. -func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions { - if in == nil { - return nil - } - out := new(SupplementalGroupsStrategyOptions) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go index 963aaffba..5c9354228 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go @@ -235,54 +235,6 @@ func (in *NetworkPolicyList) APILifecycleRemoved() (major, minor int) { return 1, 16 } -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PodSecurityPolicy) APILifecycleIntroduced() (major, minor int) { - return 1, 2 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PodSecurityPolicy) APILifecycleDeprecated() (major, minor int) { - return 1, 11 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *PodSecurityPolicy) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "PodSecurityPolicy"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PodSecurityPolicy) APILifecycleRemoved() (major, minor int) { - return 1, 16 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PodSecurityPolicyList) APILifecycleIntroduced() (major, minor int) { - return 1, 2 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PodSecurityPolicyList) APILifecycleDeprecated() (major, minor int) { - return 1, 11 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *PodSecurityPolicyList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "PodSecurityPolicyList"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PodSecurityPolicyList) APILifecycleRemoved() (major, minor int) { - return 1, 16 -} - // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *ReplicaSet) APILifecycleIntroduced() (major, minor int) { diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go index ac6f7179a..c95999fa5 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_FlowDistinguisherMethod = map[string]string{ diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go index fe4f8022a..fc08e128d 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_FlowDistinguisherMethod = map[string]string{ diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go index 4bedcce3e..b2eff7f96 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_FlowDistinguisherMethod = map[string]string{ diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go index e2bd27e8c..728252c0c 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta3 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_FlowDistinguisherMethod = map[string]string{ diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go index 8d51e77a0..dadf95e1d 100644 --- a/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ImageReview = map[string]string{ diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto index 8196a14b9..ed194a89d 100644 --- a/vendor/k8s.io/api/networking/v1/generated.proto +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -33,14 +33,14 @@ option go_package = "k8s.io/api/networking/v1"; // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. message HTTPIngressPath { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional optional string path = 1; - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -56,7 +56,7 @@ message HTTPIngressPath { // Implementations are required to support all path types. optional string pathType = 3; - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. optional IngressBackend backend = 2; } @@ -67,7 +67,7 @@ message HTTPIngressPath { // to match against everything after the last '/' and before the first '?' // or '#'. message HTTPIngressRuleValue { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. // +listType=atomic repeated HTTPIngressPath paths = 1; } @@ -76,13 +76,13 @@ message HTTPIngressRuleValue { // to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs // that should not be included within this rule. message IPBlock { - // CIDR is a string representing the IP Block + // cidr is a string representing the IPBlock // Valid examples are "192.168.1.0/24" or "2001:db8::/64" optional string cidr = 1; - // Except is a slice of CIDRs that should not be included within an IP Block + // except is a slice of CIDRs that should not be included within an IPBlock // Valid examples are "192.168.1.0/24" or "2001:db8::/64" - // Except values will be rejected if they are outside the CIDR range + // Except values will be rejected if they are outside the cidr range // +optional repeated string except = 2; } @@ -97,12 +97,12 @@ message Ingress { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressSpec spec = 2; - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressStatus status = 3; @@ -110,12 +110,12 @@ message Ingress { // IngressBackend describes all endpoints for a given service and port. message IngressBackend { - // Service references a Service as a Backend. + // service references a service as a backend. // This is a mutually exclusive setting with "Resource". // +optional optional IngressServiceBackend service = 4; - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, a service.Name and // service.Port must not be specified. // This is a mutually exclusive setting with "Service". @@ -134,7 +134,7 @@ message IngressClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressClassSpec spec = 2; @@ -146,31 +146,31 @@ message IngressClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of IngressClasses. + // items is the list of IngressClasses. repeated IngressClass items = 2; } // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. message IngressClassParametersReference { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional optional string aPIGroup = 1; - // Kind is the type of resource being referenced. + // kind is the type of resource being referenced. optional string kind = 2; - // Name is the name of resource being referenced. + // name is the name of resource being referenced. optional string name = 3; - // Scope represents if this refers to a cluster or namespace scoped resource. + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". // +optional optional string scope = 4; - // Namespace is the namespace of the resource being referenced. This field is + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -179,15 +179,15 @@ message IngressClassParametersReference { // IngressClassSpec provides information about the class of an Ingress. message IngressClassSpec { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. optional string controller = 1; - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -201,21 +201,21 @@ message IngressList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of Ingress. + // items is the list of Ingress. repeated Ingress items = 2; } // IngressLoadBalancerIngress represents the status of a load-balancer ingress point. message IngressLoadBalancerIngress { - // IP is set for load-balancer ingress points that are IP based. + // ip is set for load-balancer ingress points that are IP based. // +optional optional string ip = 1; - // Hostname is set for load-balancer ingress points that are DNS based. + // hostname is set for load-balancer ingress points that are DNS based. // +optional optional string hostname = 2; - // Ports provides information about the ports exposed by this LoadBalancer. + // ports provides information about the ports exposed by this LoadBalancer. // +listType=atomic // +optional repeated IngressPortStatus ports = 4; @@ -223,21 +223,21 @@ message IngressLoadBalancerIngress { // IngressLoadBalancerStatus represents the status of a load-balancer. message IngressLoadBalancerStatus { - // Ingress is a list containing ingress points for the load-balancer. + // ingress is a list containing ingress points for the load-balancer. // +optional repeated IngressLoadBalancerIngress ingress = 1; } // IngressPortStatus represents the error condition of a service port message IngressPortStatus { - // Port is the port number of the ingress port. + // port is the port number of the ingress port. optional int32 port = 1; - // Protocol is the protocol of the ingress port. + // protocol is the protocol of the ingress port. // The supported values are: "TCP", "UDP", "SCTP" optional string protocol = 2; - // Error is to record the problem with the service port + // error is to record the problem with the service port // The format of the error shall comply with the following rules: // - built-in error values shall be specified in this file and those shall use // CamelCase names @@ -256,7 +256,7 @@ message IngressPortStatus { // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. message IngressRule { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -269,14 +269,14 @@ message IngressRule { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). // Requests will be matched against the Host field in the following way: - // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. - // 2. If Host is a wildcard, then the request matches this rule if the http host header + // 1. If host is precise, the request matches this rule if the http host header is equal to Host. + // 2. If host is a wildcard, then the request matches this rule if the http host header // is to equal to the suffix (removing the first label) of the wildcard rule. // +optional optional string host = 1; @@ -301,18 +301,18 @@ message IngressRuleValue { // IngressServiceBackend references a Kubernetes Service as a Backend. message IngressServiceBackend { - // Name is the referenced service. The service must exist in + // name is the referenced service. The service must exist in // the same namespace as the Ingress object. optional string name = 1; - // Port of the referenced service. A port name or port number + // port of the referenced service. A port name or port number // is required for a IngressServiceBackend. optional ServiceBackendPort port = 2; } // IngressSpec describes the Ingress the user wishes to exist. message IngressSpec { - // IngressClassName is the name of an IngressClass cluster resource. Ingress + // ingressClassName is the name of an IngressClass cluster resource. Ingress // controller implementations use this field to know whether they should be // serving this Ingress resource, by a transitive connection // (controller -> IngressClass -> Ingress resource). Although the @@ -325,24 +325,24 @@ message IngressSpec { // +optional optional string ingressClassName = 4; - // DefaultBackend is the backend that should handle requests that don't + // defaultBackend is the backend that should handle requests that don't // match any rule. If Rules are not specified, DefaultBackend must be specified. // If DefaultBackend is not set, the handling of requests that do not match any // of the rules will be up to the Ingress controller. // +optional optional IngressBackend defaultBackend = 1; - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +listType=atomic // +optional repeated IngressTLS tls = 2; - // A list of host rules used to configure the Ingress. If unspecified, or - // no rule matches, all traffic is sent to the default backend. + // rules is a list of host rules used to configure the Ingress. If unspecified, + // or no rule matches, all traffic is sent to the default backend. // +listType=atomic // +optional repeated IngressRule rules = 3; @@ -350,14 +350,14 @@ message IngressSpec { // IngressStatus describe the current state of the Ingress. message IngressStatus { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. // +optional optional IngressLoadBalancerStatus loadBalancer = 1; } -// IngressTLS describes the transport layer security associated with an Ingress. +// IngressTLS describes the transport layer security associated with an ingress. message IngressTLS { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. @@ -365,11 +365,11 @@ message IngressTLS { // +optional repeated string hosts = 1; - // SecretName is the name of the secret used to terminate TLS traffic on + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination - // and value of the Host header is used for routing. + // and value of the "Host" header is used for routing. // +optional optional string secretName = 2; } @@ -381,11 +381,11 @@ message NetworkPolicy { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired behavior for this NetworkPolicy. + // spec represents the specification of the desired behavior for this NetworkPolicy. // +optional optional NetworkPolicySpec spec = 2; - // Status is the current state of the NetworkPolicy. + // status represents the current state of the NetworkPolicy. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional NetworkPolicyStatus status = 3; @@ -395,7 +395,7 @@ message NetworkPolicy { // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. // This type is beta-level in 1.8 message NetworkPolicyEgressRule { - // List of destination ports for outgoing traffic. + // ports is a list of destination ports for outgoing traffic. // Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows @@ -403,7 +403,7 @@ message NetworkPolicyEgressRule { // +optional repeated NetworkPolicyPort ports = 1; - // List of destinations for outgoing traffic of pods selected for this rule. + // to is a list of destinations for outgoing traffic of pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all destinations (traffic not restricted by // destination). If this field is present and contains at least one item, this rule @@ -415,15 +415,15 @@ message NetworkPolicyEgressRule { // NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from. message NetworkPolicyIngressRule { - // List of ports which should be made accessible on the pods selected for this - // rule. Each item in this list is combined using a logical OR. If this field is + // ports is a list of ports which should be made accessible on the pods selected for + // this rule. Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows // traffic only if the traffic matches at least one port in the list. // +optional repeated NetworkPolicyPort ports = 1; - // List of sources which should be able to access the pods selected for this rule. + // from is a list of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all sources (traffic not restricted by // source). If this field is present and contains at least one item, this rule @@ -439,32 +439,32 @@ message NetworkPolicyList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated NetworkPolicy items = 2; } // NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of // fields are allowed message NetworkPolicyPeer { - // This is a label selector which selects Pods. This field follows standard label + // podSelector is a label selector which selects pods. This field follows standard label // selector semantics; if present but empty, it selects all pods. // - // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. + // If namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the pods matching podSelector in the policy's own namespace. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; - // Selects Namespaces using cluster-scoped labels. This field follows standard label - // selector semantics; if present but empty, it selects all namespaces. + // namespaceSelector selects namespaces using cluster-scoped labels. This field follows + // standard label selector semantics; if present but empty, it selects all namespaces. // - // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. + // If podSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the namespaces selected by namespaceSelector. + // Otherwise it selects all pods in the namespaces selected by namespaceSelector. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; - // IPBlock defines policy on a particular IPBlock. If this field is set then + // ipBlock defines policy on a particular IPBlock. If this field is set then // neither of the other fields can be. // +optional optional IPBlock ipBlock = 3; @@ -472,19 +472,19 @@ message NetworkPolicyPeer { // NetworkPolicyPort describes a port to allow traffic on message NetworkPolicyPort { - // The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this - // field defaults to TCP. + // protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. + // If not specified, this field defaults to TCP. // +optional optional string protocol = 1; - // The port on the given protocol. This can either be a numerical or named + // port represents the port on the given protocol. This can either be a numerical or named // port on a pod. If this field is not provided, this matches all port names and // numbers. // If present, only traffic on the specified protocol AND port will be matched. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; - // If set, indicates that the range of ports from port to endPort, inclusive, + // endPort indicates that the range of ports from port to endPort if set, inclusive, // should be allowed by the policy. This field cannot be defined if the port field // is not defined or if the port field is defined as a named (string) port. // The endPort must be equal or greater than port. @@ -494,16 +494,16 @@ message NetworkPolicyPort { // NetworkPolicySpec provides the specification of a NetworkPolicy message NetworkPolicySpec { - // Selects the pods to which this NetworkPolicy object applies. The array of - // ingress rules is applied to any pods selected by this field. Multiple network - // policies can select the same set of pods. In this case, the ingress rules for - // each are combined additively. This field is NOT optional and follows standard - // label selector semantics. An empty podSelector matches all pods in this - // namespace. + // podSelector selects the pods to which this NetworkPolicy object applies. + // The array of ingress rules is applied to any pods selected by this field. + // Multiple network policies can select the same set of pods. In this case, + // the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; - // List of ingress rules to be applied to the selected pods. Traffic is allowed to - // a pod if there are no NetworkPolicies selecting the pod + // ingress is a list of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod // (and cluster policy otherwise allows the traffic), OR if the traffic source is // the pod's local node, OR if the traffic matches at least one ingress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If @@ -512,8 +512,8 @@ message NetworkPolicySpec { // +optional repeated NetworkPolicyIngressRule ingress = 2; - // List of egress rules to be applied to the selected pods. Outgoing traffic is - // allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // egress is a list of egress rules to be applied to the selected pods. Outgoing traffic + // is allowed if there are no NetworkPolicies selecting the pod (and cluster policy // otherwise allows the traffic), OR if the traffic matches at least one egress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves @@ -522,23 +522,23 @@ message NetworkPolicySpec { // +optional repeated NetworkPolicyEgressRule egress = 3; - // List of rule types that the NetworkPolicy relates to. + // policyTypes is a list of rule types that the NetworkPolicy relates to. // Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. - // If this field is not specified, it will default based on the existence of Ingress or Egress rules; - // policies that contain an Egress section are assumed to affect Egress, and all policies - // (whether or not they contain an Ingress section) are assumed to affect Ingress. + // If this field is not specified, it will default based on the existence of ingress or egress rules; + // policies that contain an egress section are assumed to affect egress, and all policies + // (whether or not they contain an ingress section) are assumed to affect ingress. // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. // Likewise, if you want to write a policy that specifies that no egress is allowed, // you must specify a policyTypes value that include "Egress" (since such a policy would not include - // an Egress section and would otherwise default to just [ "Ingress" ]). + // an egress section and would otherwise default to just [ "Ingress" ]). // This field is beta-level in 1.8 // +optional repeated string policyTypes = 4; } -// NetworkPolicyStatus describe the current state of the NetworkPolicy. +// NetworkPolicyStatus describes the current state of the NetworkPolicy. message NetworkPolicyStatus { - // Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. + // conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. // Current service state // +optional // +patchMergeKey=type @@ -550,12 +550,12 @@ message NetworkPolicyStatus { // ServiceBackendPort is the service port being referenced. message ServiceBackendPort { - // Name is the name of the port on the Service. + // name is the name of the port on the Service. // This is a mutually exclusive setting with "Number". // +optional optional string name = 1; - // Number is the numerical port number (e.g. 80) on the Service. + // number is the numerical port number (e.g. 80) on the Service. // This is a mutually exclusive setting with "Name". // +optional optional int32 number = 2; diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go index a9deb900a..fa7cf1bd7 100644 --- a/vendor/k8s.io/api/networking/v1/types.go +++ b/vendor/k8s.io/api/networking/v1/types.go @@ -28,16 +28,17 @@ import ( // NetworkPolicy describes what network traffic is allowed for a set of Pods type NetworkPolicy struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired behavior for this NetworkPolicy. + // spec represents the specification of the desired behavior for this NetworkPolicy. // +optional Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status is the current state of the NetworkPolicy. + // status represents the current state of the NetworkPolicy. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status NetworkPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` @@ -57,16 +58,16 @@ const ( // NetworkPolicySpec provides the specification of a NetworkPolicy type NetworkPolicySpec struct { - // Selects the pods to which this NetworkPolicy object applies. The array of - // ingress rules is applied to any pods selected by this field. Multiple network - // policies can select the same set of pods. In this case, the ingress rules for - // each are combined additively. This field is NOT optional and follows standard - // label selector semantics. An empty podSelector matches all pods in this - // namespace. + // podSelector selects the pods to which this NetworkPolicy object applies. + // The array of ingress rules is applied to any pods selected by this field. + // Multiple network policies can select the same set of pods. In this case, + // the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. PodSelector metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"` - // List of ingress rules to be applied to the selected pods. Traffic is allowed to - // a pod if there are no NetworkPolicies selecting the pod + // ingress is a list of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod // (and cluster policy otherwise allows the traffic), OR if the traffic source is // the pod's local node, OR if the traffic matches at least one ingress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If @@ -75,8 +76,8 @@ type NetworkPolicySpec struct { // +optional Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"` - // List of egress rules to be applied to the selected pods. Outgoing traffic is - // allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // egress is a list of egress rules to be applied to the selected pods. Outgoing traffic + // is allowed if there are no NetworkPolicies selecting the pod (and cluster policy // otherwise allows the traffic), OR if the traffic matches at least one egress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves @@ -85,15 +86,15 @@ type NetworkPolicySpec struct { // +optional Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` - // List of rule types that the NetworkPolicy relates to. + // policyTypes is a list of rule types that the NetworkPolicy relates to. // Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. - // If this field is not specified, it will default based on the existence of Ingress or Egress rules; - // policies that contain an Egress section are assumed to affect Egress, and all policies - // (whether or not they contain an Ingress section) are assumed to affect Ingress. + // If this field is not specified, it will default based on the existence of ingress or egress rules; + // policies that contain an egress section are assumed to affect egress, and all policies + // (whether or not they contain an ingress section) are assumed to affect ingress. // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. // Likewise, if you want to write a policy that specifies that no egress is allowed, // you must specify a policyTypes value that include "Egress" (since such a policy would not include - // an Egress section and would otherwise default to just [ "Ingress" ]). + // an egress section and would otherwise default to just [ "Ingress" ]). // This field is beta-level in 1.8 // +optional PolicyTypes []PolicyType `json:"policyTypes,omitempty" protobuf:"bytes,4,rep,name=policyTypes,casttype=PolicyType"` @@ -102,15 +103,15 @@ type NetworkPolicySpec struct { // NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from. type NetworkPolicyIngressRule struct { - // List of ports which should be made accessible on the pods selected for this - // rule. Each item in this list is combined using a logical OR. If this field is + // ports is a list of ports which should be made accessible on the pods selected for + // this rule. Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows // traffic only if the traffic matches at least one port in the list. // +optional Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` - // List of sources which should be able to access the pods selected for this rule. + // from is a list of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all sources (traffic not restricted by // source). If this field is present and contains at least one item, this rule @@ -123,7 +124,7 @@ type NetworkPolicyIngressRule struct { // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. // This type is beta-level in 1.8 type NetworkPolicyEgressRule struct { - // List of destination ports for outgoing traffic. + // ports is a list of destination ports for outgoing traffic. // Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows @@ -131,7 +132,7 @@ type NetworkPolicyEgressRule struct { // +optional Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` - // List of destinations for outgoing traffic of pods selected for this rule. + // to is a list of destinations for outgoing traffic of pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all destinations (traffic not restricted by // destination). If this field is present and contains at least one item, this rule @@ -142,19 +143,19 @@ type NetworkPolicyEgressRule struct { // NetworkPolicyPort describes a port to allow traffic on type NetworkPolicyPort struct { - // The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this - // field defaults to TCP. + // protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. + // If not specified, this field defaults to TCP. // +optional Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol,casttype=k8s.io/api/core/v1.Protocol"` - // The port on the given protocol. This can either be a numerical or named + // port represents the port on the given protocol. This can either be a numerical or named // port on a pod. If this field is not provided, this matches all port names and // numbers. // If present, only traffic on the specified protocol AND port will be matched. // +optional Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` - // If set, indicates that the range of ports from port to endPort, inclusive, + // endPort indicates that the range of ports from port to endPort if set, inclusive, // should be allowed by the policy. This field cannot be defined if the port field // is not defined or if the port field is defined as a named (string) port. // The endPort must be equal or greater than port. @@ -166,12 +167,13 @@ type NetworkPolicyPort struct { // to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs // that should not be included within this rule. type IPBlock struct { - // CIDR is a string representing the IP Block + // cidr is a string representing the IPBlock // Valid examples are "192.168.1.0/24" or "2001:db8::/64" CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"` - // Except is a slice of CIDRs that should not be included within an IP Block + + // except is a slice of CIDRs that should not be included within an IPBlock // Valid examples are "192.168.1.0/24" or "2001:db8::/64" - // Except values will be rejected if they are outside the CIDR range + // Except values will be rejected if they are outside the cidr range // +optional Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` } @@ -179,25 +181,25 @@ type IPBlock struct { // NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of // fields are allowed type NetworkPolicyPeer struct { - // This is a label selector which selects Pods. This field follows standard label + // podSelector is a label selector which selects pods. This field follows standard label // selector semantics; if present but empty, it selects all pods. // - // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. + // If namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the pods matching podSelector in the policy's own namespace. // +optional PodSelector *metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"` - // Selects Namespaces using cluster-scoped labels. This field follows standard label - // selector semantics; if present but empty, it selects all namespaces. + // namespaceSelector selects namespaces using cluster-scoped labels. This field follows + // standard label selector semantics; if present but empty, it selects all namespaces. // - // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. + // If podSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the namespaces selected by namespaceSelector. + // Otherwise it selects all pods in the namespaces selected by namespaceSelector. // +optional NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"` - // IPBlock defines policy on a particular IPBlock. If this field is set then + // ipBlock defines policy on a particular IPBlock. If this field is set then // neither of the other fields can be. // +optional IPBlock *IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"` @@ -233,9 +235,9 @@ const ( NetworkPolicyConditionReasonFeatureNotSupported NetworkPolicyConditionReason = "FeatureNotSupported" ) -// NetworkPolicyStatus describe the current state of the NetworkPolicy. +// NetworkPolicyStatus describes the current state of the NetworkPolicy. type NetworkPolicyStatus struct { - // Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. + // conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. // Current service state // +optional // +patchMergeKey=type @@ -250,12 +252,13 @@ type NetworkPolicyStatus struct { // NetworkPolicyList is a list of NetworkPolicy objects. type NetworkPolicyList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -268,17 +271,18 @@ type NetworkPolicyList struct { // based virtual hosting etc. type Ingress struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` @@ -289,18 +293,19 @@ type Ingress struct { // IngressList is a collection of Ingress. type IngressList struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of Ingress. + // items is the list of Ingress. Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` } // IngressSpec describes the Ingress the user wishes to exist. type IngressSpec struct { - // IngressClassName is the name of an IngressClass cluster resource. Ingress + // ingressClassName is the name of an IngressClass cluster resource. Ingress // controller implementations use this field to know whether they should be // serving this Ingress resource, by a transitive connection // (controller -> IngressClass -> Ingress resource). Although the @@ -313,72 +318,73 @@ type IngressSpec struct { // +optional IngressClassName *string `json:"ingressClassName,omitempty" protobuf:"bytes,4,opt,name=ingressClassName"` - // DefaultBackend is the backend that should handle requests that don't + // defaultBackend is the backend that should handle requests that don't // match any rule. If Rules are not specified, DefaultBackend must be specified. // If DefaultBackend is not set, the handling of requests that do not match any // of the rules will be up to the Ingress controller. // +optional DefaultBackend *IngressBackend `json:"defaultBackend,omitempty" protobuf:"bytes,1,opt,name=defaultBackend"` - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +listType=atomic // +optional TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` - // A list of host rules used to configure the Ingress. If unspecified, or - // no rule matches, all traffic is sent to the default backend. + // rules is a list of host rules used to configure the Ingress. If unspecified, + // or no rule matches, all traffic is sent to the default backend. // +listType=atomic // +optional Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` } -// IngressTLS describes the transport layer security associated with an Ingress. +// IngressTLS describes the transport layer security associated with an ingress. type IngressTLS struct { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +listType=atomic // +optional Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` - // SecretName is the name of the secret used to terminate TLS traffic on + + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination - // and value of the Host header is used for routing. + // and value of the "Host" header is used for routing. // +optional SecretName string `json:"secretName,omitempty" protobuf:"bytes,2,opt,name=secretName"` } // IngressStatus describe the current state of the Ingress. type IngressStatus struct { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. // +optional LoadBalancer IngressLoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` } // IngressLoadBalancerStatus represents the status of a load-balancer. type IngressLoadBalancerStatus struct { - // Ingress is a list containing ingress points for the load-balancer. + // ingress is a list containing ingress points for the load-balancer. // +optional Ingress []IngressLoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` } // IngressLoadBalancerIngress represents the status of a load-balancer ingress point. type IngressLoadBalancerIngress struct { - // IP is set for load-balancer ingress points that are IP based. + // ip is set for load-balancer ingress points that are IP based. // +optional IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` - // Hostname is set for load-balancer ingress points that are DNS based. + // hostname is set for load-balancer ingress points that are DNS based. // +optional Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` - // Ports provides information about the ports exposed by this LoadBalancer. + // ports provides information about the ports exposed by this LoadBalancer. // +listType=atomic // +optional Ports []IngressPortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"` @@ -386,14 +392,14 @@ type IngressLoadBalancerIngress struct { // IngressPortStatus represents the error condition of a service port type IngressPortStatus struct { - // Port is the port number of the ingress port. + // port is the port number of the ingress port. Port int32 `json:"port" protobuf:"varint,1,opt,name=port"` - // Protocol is the protocol of the ingress port. + // protocol is the protocol of the ingress port. // The supported values are: "TCP", "UDP", "SCTP" Protocol v1.Protocol `json:"protocol" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` - // Error is to record the problem with the service port + // error is to record the problem with the service port // The format of the error shall comply with the following rules: // - built-in error values shall be specified in this file and those shall use // CamelCase names @@ -412,7 +418,7 @@ type IngressPortStatus struct { // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. type IngressRule struct { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -425,14 +431,14 @@ type IngressRule struct { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). // Requests will be matched against the Host field in the following way: - // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. - // 2. If Host is a wildcard, then the request matches this rule if the http host header + // 1. If host is precise, the request matches this rule if the http host header is equal to Host. + // 2. If host is a wildcard, then the request matches this rule if the http host header // is to equal to the suffix (removing the first label) of the wildcard rule. // +optional Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` @@ -460,7 +466,7 @@ type IngressRuleValue struct { // to match against everything after the last '/' and before the first '?' // or '#'. type HTTPIngressRuleValue struct { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. // +listType=atomic Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` } @@ -499,14 +505,14 @@ const ( // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. type HTTPIngressPath struct { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -522,19 +528,19 @@ type HTTPIngressPath struct { // Implementations are required to support all path types. PathType *PathType `json:"pathType" protobuf:"bytes,3,opt,name=pathType"` - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` } // IngressBackend describes all endpoints for a given service and port. type IngressBackend struct { - // Service references a Service as a Backend. + // service references a service as a backend. // This is a mutually exclusive setting with "Resource". // +optional Service *IngressServiceBackend `json:"service,omitempty" protobuf:"bytes,4,opt,name=service"` - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, a service.Name and // service.Port must not be specified. // This is a mutually exclusive setting with "Service". @@ -544,23 +550,23 @@ type IngressBackend struct { // IngressServiceBackend references a Kubernetes Service as a Backend. type IngressServiceBackend struct { - // Name is the referenced service. The service must exist in + // name is the referenced service. The service must exist in // the same namespace as the Ingress object. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Port of the referenced service. A port name or port number + // port of the referenced service. A port name or port number // is required for a IngressServiceBackend. Port ServiceBackendPort `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` } // ServiceBackendPort is the service port being referenced. type ServiceBackendPort struct { - // Name is the name of the port on the Service. + // name is the name of the port on the Service. // This is a mutually exclusive setting with "Number". // +optional Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - // Number is the numerical port number (e.g. 80) on the Service. + // number is the numerical port number (e.g. 80) on the Service. // This is a mutually exclusive setting with "Name". // +optional Number int32 `json:"number,omitempty" protobuf:"bytes,2,opt,name=number"` @@ -577,12 +583,13 @@ type ServiceBackendPort struct { // resources without a class specified will be assigned this default class. type IngressClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressClassSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -590,15 +597,15 @@ type IngressClass struct { // IngressClassSpec provides information about the class of an Ingress. type IngressClassSpec struct { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. Controller string `json:"controller,omitempty" protobuf:"bytes,1,opt,name=controller"` - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -617,20 +624,24 @@ const ( // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. type IngressClassParametersReference struct { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional APIGroup *string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=aPIGroup"` - // Kind is the type of resource being referenced. + + // kind is the type of resource being referenced. Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` - // Name is the name of resource being referenced. + + // name is the name of resource being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` - // Scope represents if this refers to a cluster or namespace scoped resource. + + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". // +optional Scope *string `json:"scope" protobuf:"bytes,4,opt,name=scope"` - // Namespace is the namespace of the resource being referenced. This field is + + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -642,10 +653,11 @@ type IngressClassParametersReference struct { // IngressClassList is a collection of IngressClasses. type IngressClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of IngressClasses. + // items is the list of IngressClasses. Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go index 94ccf964b..91161d5ca 100644 --- a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go @@ -24,14 +24,14 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_HTTPIngressPath = map[string]string{ "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", - "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", - "pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types.", - "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", + "path": "path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", + "pathType": "pathType determines the interpretation of the path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types.", + "backend": "backend defines the referenced service endpoint to which the traffic will be forwarded to.", } func (HTTPIngressPath) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (HTTPIngressPath) SwaggerDoc() map[string]string { var map_HTTPIngressRuleValue = map[string]string{ "": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", - "paths": "A collection of paths that map requests to backends.", + "paths": "paths is a collection of paths that map requests to backends.", } func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { @@ -49,8 +49,8 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { var map_IPBlock = map[string]string{ "": "IPBlock describes a particular CIDR (Ex. \"192.168.1.0/24\",\"2001:db8::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", - "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\"", - "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\" Except values will be rejected if they are outside the CIDR range", + "cidr": "cidr is a string representing the IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\"", + "except": "except is a slice of CIDRs that should not be included within an IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\" Except values will be rejected if they are outside the cidr range", } func (IPBlock) SwaggerDoc() map[string]string { @@ -60,8 +60,8 @@ func (IPBlock) SwaggerDoc() map[string]string { var map_Ingress = map[string]string{ "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Ingress) SwaggerDoc() map[string]string { @@ -70,8 +70,8 @@ func (Ingress) SwaggerDoc() map[string]string { var map_IngressBackend = map[string]string{ "": "IngressBackend describes all endpoints for a given service and port.", - "service": "Service references a Service as a Backend. This is a mutually exclusive setting with \"Resource\".", - "resource": "Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, a service.Name and service.Port must not be specified. This is a mutually exclusive setting with \"Service\".", + "service": "service references a service as a backend. This is a mutually exclusive setting with \"Resource\".", + "resource": "resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, a service.Name and service.Port must not be specified. This is a mutually exclusive setting with \"Service\".", } func (IngressBackend) SwaggerDoc() map[string]string { @@ -81,7 +81,7 @@ func (IngressBackend) SwaggerDoc() map[string]string { var map_IngressClass = map[string]string{ "": "IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (IngressClass) SwaggerDoc() map[string]string { @@ -91,7 +91,7 @@ func (IngressClass) SwaggerDoc() map[string]string { var map_IngressClassList = map[string]string{ "": "IngressClassList is a collection of IngressClasses.", "metadata": "Standard list metadata.", - "items": "Items is the list of IngressClasses.", + "items": "items is the list of IngressClasses.", } func (IngressClassList) SwaggerDoc() map[string]string { @@ -100,11 +100,11 @@ func (IngressClassList) SwaggerDoc() map[string]string { var map_IngressClassParametersReference = map[string]string{ "": "IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource.", - "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", - "kind": "Kind is the type of resource being referenced.", - "name": "Name is the name of resource being referenced.", - "scope": "Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", - "namespace": "Namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", + "apiGroup": "apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "kind": "kind is the type of resource being referenced.", + "name": "name is the name of resource being referenced.", + "scope": "scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", + "namespace": "namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", } func (IngressClassParametersReference) SwaggerDoc() map[string]string { @@ -113,8 +113,8 @@ func (IngressClassParametersReference) SwaggerDoc() map[string]string { var map_IngressClassSpec = map[string]string{ "": "IngressClassSpec provides information about the class of an Ingress.", - "controller": "Controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", - "parameters": "Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", + "controller": "controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", + "parameters": "parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", } func (IngressClassSpec) SwaggerDoc() map[string]string { @@ -124,7 +124,7 @@ func (IngressClassSpec) SwaggerDoc() map[string]string { var map_IngressList = map[string]string{ "": "IngressList is a collection of Ingress.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of Ingress.", + "items": "items is the list of Ingress.", } func (IngressList) SwaggerDoc() map[string]string { @@ -133,9 +133,9 @@ func (IngressList) SwaggerDoc() map[string]string { var map_IngressLoadBalancerIngress = map[string]string{ "": "IngressLoadBalancerIngress represents the status of a load-balancer ingress point.", - "ip": "IP is set for load-balancer ingress points that are IP based.", - "hostname": "Hostname is set for load-balancer ingress points that are DNS based.", - "ports": "Ports provides information about the ports exposed by this LoadBalancer.", + "ip": "ip is set for load-balancer ingress points that are IP based.", + "hostname": "hostname is set for load-balancer ingress points that are DNS based.", + "ports": "ports provides information about the ports exposed by this LoadBalancer.", } func (IngressLoadBalancerIngress) SwaggerDoc() map[string]string { @@ -144,7 +144,7 @@ func (IngressLoadBalancerIngress) SwaggerDoc() map[string]string { var map_IngressLoadBalancerStatus = map[string]string{ "": "IngressLoadBalancerStatus represents the status of a load-balancer.", - "ingress": "Ingress is a list containing ingress points for the load-balancer.", + "ingress": "ingress is a list containing ingress points for the load-balancer.", } func (IngressLoadBalancerStatus) SwaggerDoc() map[string]string { @@ -153,9 +153,9 @@ func (IngressLoadBalancerStatus) SwaggerDoc() map[string]string { var map_IngressPortStatus = map[string]string{ "": "IngressPortStatus represents the error condition of a service port", - "port": "Port is the port number of the ingress port.", - "protocol": "Protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", - "error": "Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", + "port": "port is the port number of the ingress port.", + "protocol": "protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", + "error": "error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", } func (IngressPortStatus) SwaggerDoc() map[string]string { @@ -164,7 +164,7 @@ func (IngressPortStatus) SwaggerDoc() map[string]string { var map_IngressRule = map[string]string{ "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", - "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nHost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", + "host": "host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nhost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If host is precise, the request matches this rule if the http host header is equal to Host. 2. If host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", } func (IngressRule) SwaggerDoc() map[string]string { @@ -181,8 +181,8 @@ func (IngressRuleValue) SwaggerDoc() map[string]string { var map_IngressServiceBackend = map[string]string{ "": "IngressServiceBackend references a Kubernetes Service as a Backend.", - "name": "Name is the referenced service. The service must exist in the same namespace as the Ingress object.", - "port": "Port of the referenced service. A port name or port number is required for a IngressServiceBackend.", + "name": "name is the referenced service. The service must exist in the same namespace as the Ingress object.", + "port": "port of the referenced service. A port name or port number is required for a IngressServiceBackend.", } func (IngressServiceBackend) SwaggerDoc() map[string]string { @@ -191,10 +191,10 @@ func (IngressServiceBackend) SwaggerDoc() map[string]string { var map_IngressSpec = map[string]string{ "": "IngressSpec describes the Ingress the user wishes to exist.", - "ingressClassName": "IngressClassName is the name of an IngressClass cluster resource. Ingress controller implementations use this field to know whether they should be serving this Ingress resource, by a transitive connection (controller -> IngressClass -> Ingress resource). Although the `kubernetes.io/ingress.class` annotation (simple constant name) was never formally defined, it was widely supported by Ingress controllers to create a direct binding between Ingress controller and Ingress resources. Newly created Ingress resources should prefer using the field. However, even though the annotation is officially deprecated, for backwards compatibility reasons, ingress controllers should still honor that annotation if present.", - "defaultBackend": "DefaultBackend is the backend that should handle requests that don't match any rule. If Rules are not specified, DefaultBackend must be specified. If DefaultBackend is not set, the handling of requests that do not match any of the rules will be up to the Ingress controller.", - "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", - "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", + "ingressClassName": "ingressClassName is the name of an IngressClass cluster resource. Ingress controller implementations use this field to know whether they should be serving this Ingress resource, by a transitive connection (controller -> IngressClass -> Ingress resource). Although the `kubernetes.io/ingress.class` annotation (simple constant name) was never formally defined, it was widely supported by Ingress controllers to create a direct binding between Ingress controller and Ingress resources. Newly created Ingress resources should prefer using the field. However, even though the annotation is officially deprecated, for backwards compatibility reasons, ingress controllers should still honor that annotation if present.", + "defaultBackend": "defaultBackend is the backend that should handle requests that don't match any rule. If Rules are not specified, DefaultBackend must be specified. If DefaultBackend is not set, the handling of requests that do not match any of the rules will be up to the Ingress controller.", + "tls": "tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", + "rules": "rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", } func (IngressSpec) SwaggerDoc() map[string]string { @@ -203,7 +203,7 @@ func (IngressSpec) SwaggerDoc() map[string]string { var map_IngressStatus = map[string]string{ "": "IngressStatus describe the current state of the Ingress.", - "loadBalancer": "LoadBalancer contains the current status of the load-balancer.", + "loadBalancer": "loadBalancer contains the current status of the load-balancer.", } func (IngressStatus) SwaggerDoc() map[string]string { @@ -211,9 +211,9 @@ func (IngressStatus) SwaggerDoc() map[string]string { } var map_IngressTLS = map[string]string{ - "": "IngressTLS describes the transport layer security associated with an Ingress.", - "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", - "secretName": "SecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", + "": "IngressTLS describes the transport layer security associated with an ingress.", + "hosts": "hosts is a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", + "secretName": "secretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the \"Host\" header is used for routing.", } func (IngressTLS) SwaggerDoc() map[string]string { @@ -223,8 +223,8 @@ func (IngressTLS) SwaggerDoc() map[string]string { var map_NetworkPolicy = map[string]string{ "": "NetworkPolicy describes what network traffic is allowed for a set of Pods", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired behavior for this NetworkPolicy.", - "status": "Status is the current state of the NetworkPolicy. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec represents the specification of the desired behavior for this NetworkPolicy.", + "status": "status represents the current state of the NetworkPolicy. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (NetworkPolicy) SwaggerDoc() map[string]string { @@ -233,8 +233,8 @@ func (NetworkPolicy) SwaggerDoc() map[string]string { var map_NetworkPolicyEgressRule = map[string]string{ "": "NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8", - "ports": "List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "to": "List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", + "ports": "ports is a list of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "to": "to is a list of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", } func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { @@ -243,8 +243,8 @@ func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyIngressRule = map[string]string{ "": "NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.", - "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", + "ports": "ports is a list of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "from": "from is a list of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", } func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { @@ -254,7 +254,7 @@ func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyList = map[string]string{ "": "NetworkPolicyList is a list of NetworkPolicy objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (NetworkPolicyList) SwaggerDoc() map[string]string { @@ -263,9 +263,9 @@ func (NetworkPolicyList) SwaggerDoc() map[string]string { var map_NetworkPolicyPeer = map[string]string{ "": "NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of fields are allowed", - "podSelector": "This is a label selector which selects Pods. This field follows standard label selector semantics; if present but empty, it selects all pods.\n\nIf NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.", - "namespaceSelector": "Selects Namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces.\n\nIf PodSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.", - "ipBlock": "IPBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be.", + "podSelector": "podSelector is a label selector which selects pods. This field follows standard label selector semantics; if present but empty, it selects all pods.\n\nIf namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the pods matching podSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the pods matching podSelector in the policy's own namespace.", + "namespaceSelector": "namespaceSelector selects namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces.\n\nIf podSelector is also set, then the NetworkPolicyPeer as a whole selects the pods matching podSelector in the namespaces selected by namespaceSelector. Otherwise it selects all pods in the namespaces selected by namespaceSelector.", + "ipBlock": "ipBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be.", } func (NetworkPolicyPeer) SwaggerDoc() map[string]string { @@ -274,9 +274,9 @@ func (NetworkPolicyPeer) SwaggerDoc() map[string]string { var map_NetworkPolicyPort = map[string]string{ "": "NetworkPolicyPort describes a port to allow traffic on", - "protocol": "The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.", - "port": "The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", - "endPort": "If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port.", + "protocol": "protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.", + "port": "port represents the port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", + "endPort": "endPort indicates that the range of ports from port to endPort if set, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port.", } func (NetworkPolicyPort) SwaggerDoc() map[string]string { @@ -285,10 +285,10 @@ func (NetworkPolicyPort) SwaggerDoc() map[string]string { var map_NetworkPolicySpec = map[string]string{ "": "NetworkPolicySpec provides the specification of a NetworkPolicy", - "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", - "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", - "egress": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", - "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", + "podSelector": "podSelector selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", + "ingress": "ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", + "egress": "egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", + "policyTypes": "policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", } func (NetworkPolicySpec) SwaggerDoc() map[string]string { @@ -296,8 +296,8 @@ func (NetworkPolicySpec) SwaggerDoc() map[string]string { } var map_NetworkPolicyStatus = map[string]string{ - "": "NetworkPolicyStatus describe the current state of the NetworkPolicy.", - "conditions": "Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. Current service state", + "": "NetworkPolicyStatus describes the current state of the NetworkPolicy.", + "conditions": "conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. Current service state", } func (NetworkPolicyStatus) SwaggerDoc() map[string]string { @@ -306,8 +306,8 @@ func (NetworkPolicyStatus) SwaggerDoc() map[string]string { var map_ServiceBackendPort = map[string]string{ "": "ServiceBackendPort is the service port being referenced.", - "name": "Name is the name of the port on the Service. This is a mutually exclusive setting with \"Number\".", - "number": "Number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \"Name\".", + "name": "name is the name of the port on the Service. This is a mutually exclusive setting with \"Number\".", + "number": "number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \"Name\".", } func (ServiceBackendPort) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go b/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go index 48d401db8..f54d1f824 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go @@ -31,6 +31,8 @@ import ( math_bits "math/bits" reflect "reflect" strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" ) // Reference imports to suppress errors if they are not otherwise used. @@ -128,10 +130,126 @@ func (m *ClusterCIDRSpec) XXX_DiscardUnknown() { var xxx_messageInfo_ClusterCIDRSpec proto.InternalMessageInfo +func (m *IPAddress) Reset() { *m = IPAddress{} } +func (*IPAddress) ProtoMessage() {} +func (*IPAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{3} +} +func (m *IPAddress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddress.Merge(m, src) +} +func (m *IPAddress) XXX_Size() int { + return m.Size() +} +func (m *IPAddress) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddress.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddress proto.InternalMessageInfo + +func (m *IPAddressList) Reset() { *m = IPAddressList{} } +func (*IPAddressList) ProtoMessage() {} +func (*IPAddressList) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{4} +} +func (m *IPAddressList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddressList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressList.Merge(m, src) +} +func (m *IPAddressList) XXX_Size() int { + return m.Size() +} +func (m *IPAddressList) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressList.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddressList proto.InternalMessageInfo + +func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} } +func (*IPAddressSpec) ProtoMessage() {} +func (*IPAddressSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{5} +} +func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressSpec.Merge(m, src) +} +func (m *IPAddressSpec) XXX_Size() int { + return m.Size() +} +func (m *IPAddressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo + +func (m *ParentReference) Reset() { *m = ParentReference{} } +func (*ParentReference) ProtoMessage() {} +func (*ParentReference) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{6} +} +func (m *ParentReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ParentReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParentReference.Merge(m, src) +} +func (m *ParentReference) XXX_Size() int { + return m.Size() +} +func (m *ParentReference) XXX_DiscardUnknown() { + xxx_messageInfo_ParentReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ParentReference proto.InternalMessageInfo + func init() { proto.RegisterType((*ClusterCIDR)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDR") proto.RegisterType((*ClusterCIDRList)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDRList") proto.RegisterType((*ClusterCIDRSpec)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDRSpec") + proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1alpha1.IPAddress") + proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1alpha1.IPAddressList") + proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1alpha1.IPAddressSpec") + proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1alpha1.ParentReference") } func init() { @@ -139,39 +257,51 @@ func init() { } var fileDescriptor_c1b7ac8d7d97acec = []byte{ - // 506 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0x4f, 0x8f, 0xd2, 0x40, - 0x18, 0xc6, 0xe9, 0x2e, 0x24, 0x6b, 0xc1, 0xb0, 0xe9, 0x45, 0xc2, 0x61, 0x20, 0x9c, 0x48, 0x8c, - 0x33, 0xb2, 0x21, 0xc4, 0xab, 0xdd, 0x4d, 0x94, 0xc4, 0x3f, 0xd8, 0x4d, 0x3c, 0x18, 0x0f, 0x0e, - 0xe5, 0xb5, 0x8c, 0xd0, 0xce, 0x64, 0x66, 0xa8, 0xf1, 0xe6, 0x47, 0xf0, 0x2b, 0xe9, 0x89, 0xe3, - 0x1e, 0xf7, 0x44, 0xa4, 0x7e, 0x01, 0x3f, 0x82, 0x99, 0xa1, 0xbb, 0x94, 0x45, 0x57, 0xbd, 0x75, - 0xde, 0xf9, 0x3d, 0xcf, 0xfb, 0x3e, 0x7d, 0x5b, 0xf7, 0xc9, 0xec, 0x91, 0xc2, 0x8c, 0x93, 0xd9, - 0x62, 0x0c, 0x32, 0x01, 0x0d, 0x8a, 0xa4, 0x90, 0x4c, 0xb8, 0x24, 0xf9, 0x05, 0x15, 0x8c, 0x24, - 0xa0, 0x3f, 0x72, 0x39, 0x63, 0x49, 0x44, 0xd2, 0x1e, 0x9d, 0x8b, 0x29, 0xed, 0x91, 0x08, 0x12, - 0x90, 0x54, 0xc3, 0x04, 0x0b, 0xc9, 0x35, 0xf7, 0xd0, 0x86, 0xc7, 0x54, 0x30, 0xbc, 0xe5, 0xf1, - 0x15, 0xdf, 0x7c, 0x10, 0x31, 0x3d, 0x5d, 0x8c, 0x71, 0xc8, 0x63, 0x12, 0xf1, 0x88, 0x13, 0x2b, - 0x1b, 0x2f, 0xde, 0xdb, 0x93, 0x3d, 0xd8, 0xa7, 0x8d, 0x5d, 0xb3, 0x53, 0x68, 0x1f, 0x72, 0x09, - 0x24, 0xdd, 0x6b, 0xd9, 0xec, 0x6f, 0x99, 0x98, 0x86, 0x53, 0x96, 0x80, 0xfc, 0x44, 0xc4, 0x2c, - 0x32, 0x05, 0x45, 0x62, 0xd0, 0xf4, 0x77, 0x2a, 0xf2, 0x27, 0x95, 0x5c, 0x24, 0x9a, 0xc5, 0xb0, - 0x27, 0x18, 0xfc, 0x4d, 0xa0, 0xc2, 0x29, 0xc4, 0xf4, 0xa6, 0xae, 0xf3, 0xcd, 0x71, 0xab, 0xa7, - 0xf3, 0x85, 0xd2, 0x20, 0x4f, 0x87, 0x67, 0x81, 0xf7, 0xce, 0x3d, 0x32, 0x33, 0x4d, 0xa8, 0xa6, - 0x0d, 0xa7, 0xed, 0x74, 0xab, 0x27, 0x0f, 0xf1, 0xf6, 0xa5, 0x5d, 0x5b, 0x63, 0x31, 0x8b, 0x4c, - 0x41, 0x61, 0x43, 0xe3, 0xb4, 0x87, 0x5f, 0x8e, 0x3f, 0x40, 0xa8, 0x9f, 0x83, 0xa6, 0xbe, 0xb7, - 0x5c, 0xb5, 0x4a, 0xd9, 0xaa, 0xe5, 0x6e, 0x6b, 0xc1, 0xb5, 0xab, 0xf7, 0xca, 0x2d, 0x2b, 0x01, - 0x61, 0xe3, 0xc0, 0xba, 0x13, 0x7c, 0xfb, 0x4a, 0x70, 0x61, 0xb8, 0x73, 0x01, 0xa1, 0x5f, 0xcb, - 0xcd, 0xcb, 0xe6, 0x14, 0x58, 0xab, 0xce, 0x57, 0xc7, 0xad, 0x17, 0xb8, 0x67, 0x4c, 0x69, 0xef, - 0xed, 0x5e, 0x10, 0xfc, 0x6f, 0x41, 0x8c, 0xda, 0xc6, 0x38, 0xce, 0x3b, 0x1d, 0x5d, 0x55, 0x0a, - 0x21, 0x46, 0x6e, 0x85, 0x69, 0x88, 0x55, 0xe3, 0xa0, 0x7d, 0xd8, 0xad, 0x9e, 0xdc, 0xff, 0x8f, - 0x14, 0xfe, 0xdd, 0xdc, 0xb7, 0x32, 0x34, 0x0e, 0xc1, 0xc6, 0xa8, 0xf3, 0x73, 0x37, 0x83, 0x49, - 0xe7, 0xbd, 0x76, 0x6b, 0x09, 0x9f, 0xc0, 0x39, 0xcc, 0x21, 0xd4, 0x5c, 0xe6, 0x39, 0xda, 0xc5, - 0x66, 0xe6, 0xb3, 0x33, 0x53, 0xbf, 0x28, 0x70, 0xfe, 0x71, 0xb6, 0x6a, 0xd5, 0x8a, 0x95, 0x60, - 0xc7, 0xc7, 0x7b, 0xec, 0xd6, 0x05, 0x48, 0x03, 0x3c, 0xe5, 0x4a, 0xfb, 0x4c, 0x2b, 0xbb, 0x8d, - 0x8a, 0x7f, 0x2f, 0x1f, 0xad, 0x3e, 0xda, 0xbd, 0x0e, 0x6e, 0xf2, 0x5e, 0xdb, 0x2d, 0x33, 0x91, - 0xf6, 0x1b, 0x87, 0x6d, 0xa7, 0x7b, 0x67, 0xbb, 0x94, 0xe1, 0x28, 0xed, 0x07, 0xf6, 0x26, 0x27, - 0x06, 0x8d, 0xf2, 0x1e, 0x31, 0xb0, 0xc4, 0xc0, 0x3f, 0x5b, 0xae, 0x51, 0xe9, 0x62, 0x8d, 0x4a, - 0x97, 0x6b, 0x54, 0xfa, 0x9c, 0x21, 0x67, 0x99, 0x21, 0xe7, 0x22, 0x43, 0xce, 0x65, 0x86, 0x9c, - 0xef, 0x19, 0x72, 0xbe, 0xfc, 0x40, 0xa5, 0x37, 0xe8, 0xf6, 0x7f, 0xfc, 0x57, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xdf, 0x1d, 0xe9, 0x86, 0x1d, 0x04, 0x00, 0x00, + // 698 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x95, 0xcf, 0x4e, 0xdb, 0x4a, + 0x14, 0xc6, 0x63, 0x92, 0x48, 0x78, 0x00, 0x85, 0xeb, 0xcd, 0x8d, 0x58, 0x38, 0xb9, 0xb9, 0x1b, + 0xae, 0x6e, 0x19, 0x03, 0x42, 0x51, 0xb7, 0x98, 0x48, 0x34, 0x52, 0x0b, 0xe9, 0x20, 0xba, 0xa8, + 0x58, 0xd4, 0xb1, 0x0f, 0x8e, 0x1b, 0xfc, 0x47, 0x33, 0xe3, 0x54, 0xec, 0xfa, 0x08, 0x7d, 0xa1, + 0x56, 0x6a, 0x57, 0x2c, 0x59, 0xb2, 0x8a, 0x8a, 0xfb, 0x02, 0x5d, 0xb7, 0x9b, 0x6a, 0x26, 0x4e, + 0xec, 0x24, 0x0d, 0xd0, 0x0d, 0xbb, 0xcc, 0x39, 0xbf, 0xf3, 0xcd, 0x39, 0x73, 0xbe, 0x24, 0xe8, + 0xb0, 0xff, 0x94, 0x61, 0x2f, 0x34, 0xfa, 0x71, 0x17, 0x68, 0x00, 0x1c, 0x98, 0x31, 0x80, 0xc0, + 0x09, 0xa9, 0x91, 0x26, 0xac, 0xc8, 0x33, 0x02, 0xe0, 0xef, 0x42, 0xda, 0xf7, 0x02, 0xd7, 0x18, + 0xec, 0x58, 0x17, 0x51, 0xcf, 0xda, 0x31, 0x5c, 0x08, 0x80, 0x5a, 0x1c, 0x1c, 0x1c, 0xd1, 0x90, + 0x87, 0x9a, 0x3e, 0xe2, 0xb1, 0x15, 0x79, 0x38, 0xe3, 0xf1, 0x98, 0xdf, 0xd8, 0x72, 0x3d, 0xde, + 0x8b, 0xbb, 0xd8, 0x0e, 0x7d, 0xc3, 0x0d, 0xdd, 0xd0, 0x90, 0x65, 0xdd, 0xf8, 0x5c, 0x9e, 0xe4, + 0x41, 0x7e, 0x1a, 0xc9, 0x6d, 0x34, 0x72, 0xd7, 0xdb, 0x21, 0x05, 0x63, 0x30, 0x77, 0xe5, 0xc6, + 0x5e, 0xc6, 0xf8, 0x96, 0xdd, 0xf3, 0x02, 0xa0, 0x97, 0x46, 0xd4, 0x77, 0x45, 0x80, 0x19, 0x3e, + 0x70, 0xeb, 0x77, 0x55, 0xc6, 0xa2, 0x2a, 0x1a, 0x07, 0xdc, 0xf3, 0x61, 0xae, 0xa0, 0x79, 0x5f, + 0x01, 0xb3, 0x7b, 0xe0, 0x5b, 0xb3, 0x75, 0x8d, 0x2f, 0x0a, 0x5a, 0x39, 0xb8, 0x88, 0x19, 0x07, + 0x7a, 0xd0, 0x6e, 0x11, 0xed, 0x0d, 0x5a, 0x16, 0x3d, 0x39, 0x16, 0xb7, 0xaa, 0x4a, 0x5d, 0xd9, + 0x5c, 0xd9, 0xdd, 0xc6, 0xd9, 0xa3, 0x4d, 0xa4, 0x71, 0xd4, 0x77, 0x45, 0x80, 0x61, 0x41, 0xe3, + 0xc1, 0x0e, 0x3e, 0xee, 0xbe, 0x05, 0x9b, 0xbf, 0x00, 0x6e, 0x99, 0xda, 0xd5, 0xb0, 0x56, 0x48, + 0x86, 0x35, 0x94, 0xc5, 0xc8, 0x44, 0x55, 0x7b, 0x89, 0x4a, 0x2c, 0x02, 0xbb, 0xba, 0x24, 0xd5, + 0x0d, 0x7c, 0xf7, 0x4a, 0x70, 0xae, 0xb9, 0x93, 0x08, 0x6c, 0x73, 0x35, 0x15, 0x2f, 0x89, 0x13, + 0x91, 0x52, 0x8d, 0xcf, 0x0a, 0xaa, 0xe4, 0xb8, 0xe7, 0x1e, 0xe3, 0xda, 0xd9, 0xdc, 0x20, 0xf8, + 0x61, 0x83, 0x88, 0x6a, 0x39, 0xc6, 0x7a, 0x7a, 0xd3, 0xf2, 0x38, 0x92, 0x1b, 0xa2, 0x83, 0xca, + 0x1e, 0x07, 0x9f, 0x55, 0x97, 0xea, 0xc5, 0xcd, 0x95, 0xdd, 0xff, 0xff, 0x60, 0x0a, 0x73, 0x2d, + 0xd5, 0x2d, 0xb7, 0x85, 0x02, 0x19, 0x09, 0x35, 0xbe, 0x4f, 0xcf, 0x20, 0xa6, 0xd3, 0x5e, 0xa1, + 0xd5, 0x20, 0x74, 0xe0, 0x04, 0x2e, 0xc0, 0xe6, 0x21, 0x4d, 0xe7, 0xa8, 0xe7, 0x2f, 0x13, 0xb6, + 0x13, 0x5d, 0x1f, 0xe5, 0x38, 0x73, 0x3d, 0x19, 0xd6, 0x56, 0xf3, 0x11, 0x32, 0xa5, 0xa3, 0xed, + 0xa3, 0x4a, 0x04, 0x54, 0x00, 0xcf, 0x42, 0xc6, 0x4d, 0x8f, 0x33, 0xb9, 0x8d, 0xb2, 0xf9, 0x77, + 0xda, 0x5a, 0xa5, 0x33, 0x9d, 0x26, 0xb3, 0xbc, 0x56, 0x47, 0x25, 0x2f, 0x1a, 0xec, 0x55, 0x8b, + 0x75, 0x65, 0x53, 0xcd, 0x96, 0xd2, 0xee, 0x0c, 0xf6, 0x88, 0xcc, 0xa4, 0x44, 0xb3, 0x5a, 0x9a, + 0x23, 0x9a, 0x92, 0x68, 0x36, 0x3e, 0x29, 0x48, 0x6d, 0x77, 0xf6, 0x1d, 0x87, 0x02, 0x63, 0x8f, + 0xe0, 0xbc, 0xe3, 0x29, 0xe7, 0x6d, 0xdd, 0xb7, 0xb3, 0x49, 0x6b, 0x0b, 0x7d, 0xf7, 0x51, 0x41, + 0x6b, 0x13, 0xea, 0x11, 0x5c, 0x77, 0x34, 0xed, 0xba, 0xff, 0x1e, 0x3c, 0xc1, 0x02, 0xcf, 0xf9, + 0xb9, 0xf6, 0xa5, 0xe1, 0xce, 0x90, 0x1a, 0x59, 0x14, 0x02, 0x4e, 0xe0, 0x3c, 0xed, 0xff, 0xde, + 0x2f, 0x68, 0x67, 0x5c, 0x00, 0x14, 0x02, 0x1b, 0xcc, 0xb5, 0x64, 0x58, 0x53, 0x27, 0x41, 0x92, + 0x09, 0x36, 0x7e, 0x2a, 0xa8, 0x32, 0x43, 0x6b, 0xff, 0xa2, 0xb2, 0x4b, 0xc3, 0x38, 0x92, 0xb7, + 0xa9, 0x59, 0x9f, 0x87, 0x22, 0x48, 0x46, 0x39, 0xed, 0x09, 0x5a, 0xa6, 0xc0, 0xc2, 0x98, 0xda, + 0x20, 0x97, 0xa7, 0x66, 0xaf, 0x44, 0xd2, 0x38, 0x99, 0x10, 0x9a, 0x81, 0xd4, 0xc0, 0xf2, 0x81, + 0x45, 0x96, 0x0d, 0xa9, 0x3f, 0xff, 0x4a, 0x71, 0xf5, 0x68, 0x9c, 0x20, 0x19, 0x23, 0x9c, 0x2a, + 0x0e, 0xb3, 0x4e, 0x15, 0x2c, 0x91, 0x19, 0xcd, 0x44, 0xc5, 0xd8, 0x73, 0xaa, 0x65, 0x09, 0x6c, + 0xa7, 0x40, 0xf1, 0xb4, 0xdd, 0xfa, 0x31, 0xac, 0xfd, 0xb3, 0xe8, 0x97, 0x97, 0x5f, 0x46, 0xc0, + 0xf0, 0x69, 0xbb, 0x45, 0x44, 0xb1, 0xd9, 0xba, 0xba, 0xd5, 0x0b, 0xd7, 0xb7, 0x7a, 0xe1, 0xe6, + 0x56, 0x2f, 0xbc, 0x4f, 0x74, 0xe5, 0x2a, 0xd1, 0x95, 0xeb, 0x44, 0x57, 0x6e, 0x12, 0x5d, 0xf9, + 0x9a, 0xe8, 0xca, 0x87, 0x6f, 0x7a, 0xe1, 0xb5, 0x7e, 0xf7, 0x3f, 0xda, 0xaf, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xf9, 0x9d, 0x9e, 0xc6, 0x0b, 0x07, 0x00, 0x00, } func (m *ClusterCIDR) Marshal() (dAtA []byte, err error) { @@ -312,6 +442,179 @@ func (m *ClusterCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *IPAddress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IPAddressList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ParentRef != nil { + { + size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ParentReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x2a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { offset -= sovGenerated(v) base := offset @@ -350,82 +653,597 @@ func (m *ClusterCIDRList) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - return n -} - -func (m *ClusterCIDRSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NodeSelector != nil { - l = m.NodeSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 1 + sovGenerated(uint64(m.PerNodeHostBits)) - l = len(m.IPv4) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.IPv6) - n += 1 + l + sovGenerated(uint64(l)) - return n -} + return n +} + +func (m *ClusterCIDRSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.PerNodeHostBits)) + l = len(m.IPv4) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IPv6) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IPAddress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IPAddressList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IPAddressSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ParentRef != nil { + l = m.ParentRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ParentReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterCIDR) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterCIDR{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterCIDRSpec", "ClusterCIDRSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterCIDRList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterCIDR{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterCIDR", "ClusterCIDR", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterCIDRList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterCIDRSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterCIDRSpec{`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, + `PerNodeHostBits:` + fmt.Sprintf("%v", this.PerNodeHostBits) + `,`, + `IPv4:` + fmt.Sprintf("%v", this.IPv4) + `,`, + `IPv6:` + fmt.Sprintf("%v", this.IPv6) + `,`, + `}`, + }, "") + return s +} +func (this *IPAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAddress{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IPAddressList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]IPAddress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&IPAddressList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *IPAddressSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAddressSpec{`, + `ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ParentReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParentReference{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterCIDR: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterCIDR: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterCIDRList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterCIDR{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterCIDRSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v11.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PerNodeHostBits", wireType) + } + m.PerNodeHostBits = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PerNodeHostBits |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPv4", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IPv4 = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPv6", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IPv6 = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ClusterCIDR) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterCIDR{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterCIDRSpec", "ClusterCIDRSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterCIDRList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ClusterCIDR{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterCIDR", "ClusterCIDR", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ClusterCIDRList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ClusterCIDRSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterCIDRSpec{`, - `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, - `PerNodeHostBits:` + fmt.Sprintf("%v", this.PerNodeHostBits) + `,`, - `IPv4:` + fmt.Sprintf("%v", this.IPv4) + `,`, - `IPv6:` + fmt.Sprintf("%v", this.IPv6) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } -func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { +func (m *IPAddress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -448,10 +1266,10 @@ func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterCIDR: wiretype end group for non-group") + return fmt.Errorf("proto: IPAddress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCIDR: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -541,7 +1359,7 @@ func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { +func (m *IPAddressList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -564,10 +1382,10 @@ func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterCIDRList: wiretype end group for non-group") + return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -632,7 +1450,7 @@ func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ClusterCIDR{}) + m.Items = append(m.Items, IPAddress{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -658,7 +1476,7 @@ func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { +func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -681,15 +1499,15 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterCIDRSpec: wiretype end group for non-group") + return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -716,18 +1534,100 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NodeSelector == nil { - m.NodeSelector = &v11.NodeSelector{} + if m.ParentRef == nil { + m.ParentRef = &ParentReference{} } - if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParentReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParentReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PerNodeHostBits", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } - m.PerNodeHostBits = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -737,14 +1637,27 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.PerNodeHostBits |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPv4", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -772,11 +1685,11 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IPv4 = string(dAtA[iNdEx:postIndex]) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPv6", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -804,7 +1717,39 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IPv6 = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.proto b/vendor/k8s.io/api/networking/v1alpha1/generated.proto index bbda585b8..0f1f30d70 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/networking/v1alpha1/generated.proto @@ -44,7 +44,7 @@ message ClusterCIDR { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the ClusterCIDR. + // spec is the desired state of the ClusterCIDR. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ClusterCIDRSpec spec = 2; @@ -57,19 +57,19 @@ message ClusterCIDRList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of ClusterCIDRs. + // items is the list of ClusterCIDRs. repeated ClusterCIDR items = 2; } // ClusterCIDRSpec defines the desired state of ClusterCIDR. message ClusterCIDRSpec { - // NodeSelector defines which nodes the config is applicable to. - // An empty or nil NodeSelector selects all nodes. + // nodeSelector defines which nodes the config is applicable to. + // An empty or nil nodeSelector selects all nodes. // This field is immutable. // +optional optional k8s.io.api.core.v1.NodeSelector nodeSelector = 1; - // PerNodeHostBits defines the number of host bits to be configured per node. + // perNodeHostBits defines the number of host bits to be configured per node. // A subnet mask determines how much of the address is used for network bits // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the // address into 24 bits for the network portion and 8 bits for the host portion. @@ -79,16 +79,77 @@ message ClusterCIDRSpec { // +required optional int32 perNodeHostBits = 2; - // IPv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). - // At least one of IPv4 and IPv6 must be specified. + // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). + // At least one of ipv4 and ipv6 must be specified. // This field is immutable. // +optional optional string ipv4 = 3; - // IPv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). - // At least one of IPv4 and IPv6 must be specified. + // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). + // At least one of ipv4 and ipv6 must be specified. // This field is immutable. // +optional optional string ipv6 = 4; } +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, +// the name of the object is the IP address in canonical format, four decimal digits separated +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 +message IPAddress { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the desired state of the IPAddress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional IPAddressSpec spec = 2; +} + +// IPAddressList contains a list of IPAddress. +message IPAddressList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of IPAddresses. + repeated IPAddress items = 2; +} + +// IPAddressSpec describe the attributes in an IP Address. +message IPAddressSpec { + // ParentRef references the resource that an IPAddress is attached to. + // An IPAddress must reference a parent object. + // +required + optional ParentReference parentRef = 1; +} + +// ParentReference describes a reference to a parent object. +message ParentReference { + // Group is the group of the object being referenced. + // +optional + optional string group = 1; + + // Resource is the resource of the object being referenced. + // +required + optional string resource = 2; + + // Namespace is the namespace of the object being referenced. + // +optional + optional string namespace = 3; + + // Name is the name of the object being referenced. + // +required + optional string name = 4; + + // UID is the uid of the object being referenced. + // +optional + optional string uid = 5; +} + diff --git a/vendor/k8s.io/api/networking/v1alpha1/register.go b/vendor/k8s.io/api/networking/v1alpha1/register.go index 12c0cf7bd..8dda6394d 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/register.go +++ b/vendor/k8s.io/api/networking/v1alpha1/register.go @@ -22,12 +22,17 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -// GroupName is the group name use in this package. +// GroupName is the group name used in this package. const GroupName = "networking.k8s.io" -// SchemeGroupVersion is group version used to register these objects. +// SchemeGroupVersion is group version used to register objects in this package. var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} +// Kind takes an unqualified kind and returns a Group qualified GroupKind. +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + // Resource takes an unqualified resource and returns a Group qualified GroupResource. func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() @@ -49,8 +54,9 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &ClusterCIDR{}, &ClusterCIDRList{}, + &IPAddress{}, + &IPAddressList{}, ) - // Add the watch version that applies. metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/k8s.io/api/networking/v1alpha1/types.go b/vendor/k8s.io/api/networking/v1alpha1/types.go index 734e9bf8a..52e4a11e8 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/types.go +++ b/vendor/k8s.io/api/networking/v1alpha1/types.go @@ -19,6 +19,7 @@ package v1alpha1 import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" ) // +genclient @@ -37,12 +38,13 @@ import ( // selector matches the Node may be used. type ClusterCIDR struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the ClusterCIDR. + // spec is the desired state of the ClusterCIDR. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ClusterCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -50,13 +52,13 @@ type ClusterCIDR struct { // ClusterCIDRSpec defines the desired state of ClusterCIDR. type ClusterCIDRSpec struct { - // NodeSelector defines which nodes the config is applicable to. - // An empty or nil NodeSelector selects all nodes. + // nodeSelector defines which nodes the config is applicable to. + // An empty or nil nodeSelector selects all nodes. // This field is immutable. // +optional NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` - // PerNodeHostBits defines the number of host bits to be configured per node. + // perNodeHostBits defines the number of host bits to be configured per node. // A subnet mask determines how much of the address is used for network bits // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the // address into 24 bits for the network portion and 8 bits for the host portion. @@ -66,14 +68,14 @@ type ClusterCIDRSpec struct { // +required PerNodeHostBits int32 `json:"perNodeHostBits" protobuf:"varint,2,opt,name=perNodeHostBits"` - // IPv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). - // At least one of IPv4 and IPv6 must be specified. + // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). + // At least one of ipv4 and ipv6 must be specified. // This field is immutable. // +optional IPv4 string `json:"ipv4" protobuf:"bytes,3,opt,name=ipv4"` - // IPv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). - // At least one of IPv4 and IPv6 must be specified. + // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). + // At least one of ipv4 and ipv6 must be specified. // This field is immutable. // +optional IPv6 string `json:"ipv6" protobuf:"bytes,4,opt,name=ipv6"` @@ -85,11 +87,77 @@ type ClusterCIDRSpec struct { // ClusterCIDRList contains a list of ClusterCIDR. type ClusterCIDRList struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of ClusterCIDRs. + // items is the list of ClusterCIDRs. Items []ClusterCIDR `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.27 + +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, +// the name of the object is the IP address in canonical format, four decimal digits separated +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 +type IPAddress struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // spec is the desired state of the IPAddress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec IPAddressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// IPAddressSpec describe the attributes in an IP Address. +type IPAddressSpec struct { + // ParentRef references the resource that an IPAddress is attached to. + // An IPAddress must reference a parent object. + // +required + ParentRef *ParentReference `json:"parentRef,omitempty" protobuf:"bytes,1,opt,name=parentRef"` +} + +// ParentReference describes a reference to a parent object. +type ParentReference struct { + // Group is the group of the object being referenced. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"` + // Resource is the resource of the object being referenced. + // +required + Resource string `json:"resource,omitempty" protobuf:"bytes,2,opt,name=resource"` + // Namespace is the namespace of the object being referenced. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` + // Name is the name of the object being referenced. + // +required + Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"` + // UID is the uid of the object being referenced. + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.27 + +// IPAddressList contains a list of IPAddress. +type IPAddressList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // items is the list of IPAddresses. + Items []IPAddress `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go index e0d4a4786..85304784f 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ClusterCIDR = map[string]string{ "": "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (ClusterCIDR) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (ClusterCIDR) SwaggerDoc() map[string]string { var map_ClusterCIDRList = map[string]string{ "": "ClusterCIDRList contains a list of ClusterCIDR.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of ClusterCIDRs.", + "items": "items is the list of ClusterCIDRs.", } func (ClusterCIDRList) SwaggerDoc() map[string]string { @@ -49,14 +49,56 @@ func (ClusterCIDRList) SwaggerDoc() map[string]string { var map_ClusterCIDRSpec = map[string]string{ "": "ClusterCIDRSpec defines the desired state of ClusterCIDR.", - "nodeSelector": "NodeSelector defines which nodes the config is applicable to. An empty or nil NodeSelector selects all nodes. This field is immutable.", - "perNodeHostBits": "PerNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", - "ipv4": "IPv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.", - "ipv6": "IPv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.", + "nodeSelector": "nodeSelector defines which nodes the config is applicable to. An empty or nil nodeSelector selects all nodes. This field is immutable.", + "perNodeHostBits": "perNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", + "ipv4": "ipv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", + "ipv6": "ipv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", } func (ClusterCIDRSpec) SwaggerDoc() map[string]string { return map_ClusterCIDRSpec } +var map_IPAddress = map[string]string{ + "": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the IPAddress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (IPAddress) SwaggerDoc() map[string]string { + return map_IPAddress +} + +var map_IPAddressList = map[string]string{ + "": "IPAddressList contains a list of IPAddress.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of IPAddresses.", +} + +func (IPAddressList) SwaggerDoc() map[string]string { + return map_IPAddressList +} + +var map_IPAddressSpec = map[string]string{ + "": "IPAddressSpec describe the attributes in an IP Address.", + "parentRef": "ParentRef references the resource that an IPAddress is attached to. An IPAddress must reference a parent object.", +} + +func (IPAddressSpec) SwaggerDoc() map[string]string { + return map_IPAddressSpec +} + +var map_ParentReference = map[string]string{ + "": "ParentReference describes a reference to a parent object.", + "group": "Group is the group of the object being referenced.", + "resource": "Resource is the resource of the object being referenced.", + "namespace": "Namespace is the namespace of the object being referenced.", + "name": "Name is the name of the object being referenced.", + "uid": "UID is the uid of the object being referenced.", +} + +func (ParentReference) SwaggerDoc() map[string]string { + return map_ParentReference +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go b/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go new file mode 100644 index 000000000..5f9c23f70 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go @@ -0,0 +1,33 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +const ( + + // TODO: Use IPFamily as field with a field selector,And the value is set based on + // the name at create time and immutable. + // LabelIPAddressFamily is used to indicate the IP family of a Kubernetes IPAddress. + // This label simplify dual-stack client operations allowing to obtain the list of + // IP addresses filtered by family. + LabelIPAddressFamily = "ipaddress.kubernetes.io/ip-family" + // LabelManagedBy is used to indicate the controller or entity that manages + // an IPAddress. This label aims to enable different IPAddress + // objects to be managed by different controllers or entities within the + // same cluster. It is highly recommended to configure this label for all + // IPAddress objects. + LabelManagedBy = "ipaddress.kubernetes.io/managed-by" +) diff --git a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go index e549f3166..97db2eacc 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go @@ -106,3 +106,100 @@ func (in *ClusterCIDRSpec) DeepCopy() *ClusterCIDRSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddress) DeepCopyInto(out *IPAddress) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress. +func (in *IPAddress) DeepCopy() *IPAddress { + if in == nil { + return nil + } + out := new(IPAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPAddress) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressList) DeepCopyInto(out *IPAddressList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IPAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList. +func (in *IPAddressList) DeepCopy() *IPAddressList { + if in == nil { + return nil + } + out := new(IPAddressList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPAddressList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) { + *out = *in + if in.ParentRef != nil { + in, out := &in.ParentRef, &out.ParentRef + *out = new(ParentReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec. +func (in *IPAddressSpec) DeepCopy() *IPAddressSpec { + if in == nil { + return nil + } + out := new(IPAddressSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParentReference) DeepCopyInto(out *ParentReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference. +func (in *ParentReference) DeepCopy() *ParentReference { + if in == nil { + return nil + } + out := new(ParentReference) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go index dd6e3b26c..60438ba59 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -56,3 +56,39 @@ func (in *ClusterCIDRList) APILifecycleDeprecated() (major, minor int) { func (in *ClusterCIDRList) APILifecycleRemoved() (major, minor int) { return 1, 31 } + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IPAddress) APILifecycleIntroduced() (major, minor int) { + return 1, 27 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *IPAddress) APILifecycleDeprecated() (major, minor int) { + return 1, 30 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *IPAddress) APILifecycleRemoved() (major, minor int) { + return 1, 33 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) { + return 1, 27 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *IPAddressList) APILifecycleDeprecated() (major, minor int) { + return 1, 30 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *IPAddressList) APILifecycleRemoved() (major, minor int) { + return 1, 33 +} diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.proto b/vendor/k8s.io/api/networking/v1beta1/generated.proto index 78ecf9fae..46bb7f66f 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.proto +++ b/vendor/k8s.io/api/networking/v1beta1/generated.proto @@ -33,14 +33,14 @@ option go_package = "k8s.io/api/networking/v1beta1"; // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. message HTTPIngressPath { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional optional string path = 1; - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -57,7 +57,7 @@ message HTTPIngressPath { // Defaults to ImplementationSpecific. optional string pathType = 3; - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. optional IngressBackend backend = 2; } @@ -68,7 +68,7 @@ message HTTPIngressPath { // to match against everything after the last '/' and before the first '?' // or '#'. message HTTPIngressRuleValue { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. repeated HTTPIngressPath paths = 1; } @@ -82,12 +82,12 @@ message Ingress { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressSpec spec = 2; - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressStatus status = 3; @@ -95,15 +95,15 @@ message Ingress { // IngressBackend describes all endpoints for a given service and port. message IngressBackend { - // Specifies the name of the referenced service. + // serviceName specifies the name of the referenced service. // +optional optional string serviceName = 1; - // Specifies the port of the referenced service. + // servicePort Specifies the port of the referenced service. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, serviceName and servicePort // must not be specified. // +optional @@ -121,7 +121,7 @@ message IngressClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressClassSpec spec = 2; @@ -133,30 +133,30 @@ message IngressClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of IngressClasses. + // items is the list of IngressClasses. repeated IngressClass items = 2; } // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. message IngressClassParametersReference { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional optional string aPIGroup = 1; - // Kind is the type of resource being referenced. + // kind is the type of resource being referenced. optional string kind = 2; - // Name is the name of resource being referenced. + // name is the name of resource being referenced. optional string name = 3; - // Scope represents if this refers to a cluster or namespace scoped resource. + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". optional string scope = 4; - // Namespace is the namespace of the resource being referenced. This field is + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -165,15 +165,15 @@ message IngressClassParametersReference { // IngressClassSpec provides information about the class of an Ingress. message IngressClassSpec { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. optional string controller = 1; - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -187,21 +187,21 @@ message IngressList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of Ingress. + // items is the list of Ingress. repeated Ingress items = 2; } // IngressLoadBalancerIngress represents the status of a load-balancer ingress point. message IngressLoadBalancerIngress { - // IP is set for load-balancer ingress points that are IP based. + // ip is set for load-balancer ingress points that are IP based. // +optional optional string ip = 1; - // Hostname is set for load-balancer ingress points that are DNS based. + // hostname is set for load-balancer ingress points that are DNS based. // +optional optional string hostname = 2; - // Ports provides information about the ports exposed by this LoadBalancer. + // ports provides information about the ports exposed by this LoadBalancer. // +listType=atomic // +optional repeated IngressPortStatus ports = 4; @@ -209,21 +209,21 @@ message IngressLoadBalancerIngress { // LoadBalancerStatus represents the status of a load-balancer. message IngressLoadBalancerStatus { - // Ingress is a list containing ingress points for the load-balancer. + // ingress is a list containing ingress points for the load-balancer. // +optional repeated IngressLoadBalancerIngress ingress = 1; } // IngressPortStatus represents the error condition of a service port message IngressPortStatus { - // Port is the port number of the ingress port. + // port is the port number of the ingress port. optional int32 port = 1; - // Protocol is the protocol of the ingress port. + // protocol is the protocol of the ingress port. // The supported values are: "TCP", "UDP", "SCTP" optional string protocol = 2; - // Error is to record the problem with the service port + // error is to record the problem with the service port // The format of the error shall comply with the following rules: // - built-in error values shall be specified in this file and those shall use // CamelCase names @@ -242,7 +242,7 @@ message IngressPortStatus { // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. message IngressRule { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -255,7 +255,7 @@ message IngressRule { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and @@ -287,7 +287,7 @@ message IngressRuleValue { // IngressSpec describes the Ingress the user wishes to exist. message IngressSpec { - // IngressClassName is the name of the IngressClass cluster resource. The + // ingressClassName is the name of the IngressClass cluster resource. The // associated IngressClass defines which controller will implement the // resource. This replaces the deprecated `kubernetes.io/ingress.class` // annotation. For backwards compatibility, when that annotation is set, it @@ -300,44 +300,44 @@ message IngressSpec { // +optional optional string ingressClassName = 4; - // A default backend capable of servicing requests that don't match any + // backend is the default backend capable of servicing requests that don't match any // rule. At least one of 'backend' or 'rules' must be specified. This field // is optional to allow the loadbalancer controller or defaulting logic to // specify a global default. // +optional optional IngressBackend backend = 1; - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +optional repeated IngressTLS tls = 2; - // A list of host rules used to configure the Ingress. If unspecified, or + // rules is a list of host rules used to configure the Ingress. If unspecified, or // no rule matches, all traffic is sent to the default backend. // +optional repeated IngressRule rules = 3; } -// IngressStatus describe the current state of the Ingress. +// IngressStatus describes the current state of the Ingress. message IngressStatus { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. // +optional optional IngressLoadBalancerStatus loadBalancer = 1; } // IngressTLS describes the transport layer security associated with an Ingress. message IngressTLS { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +optional repeated string hosts = 1; - // SecretName is the name of the secret used to terminate TLS traffic on + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination diff --git a/vendor/k8s.io/api/networking/v1beta1/types.go b/vendor/k8s.io/api/networking/v1beta1/types.go index 49c82123d..87cc91654 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types.go +++ b/vendor/k8s.io/api/networking/v1beta1/types.go @@ -34,17 +34,18 @@ import ( // based virtual hosting etc. type Ingress struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` @@ -58,18 +59,19 @@ type Ingress struct { // IngressList is a collection of Ingress. type IngressList struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of Ingress. + // items is the list of Ingress. Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` } // IngressSpec describes the Ingress the user wishes to exist. type IngressSpec struct { - // IngressClassName is the name of the IngressClass cluster resource. The + // ingressClassName is the name of the IngressClass cluster resource. The // associated IngressClass defines which controller will implement the // resource. This replaces the deprecated `kubernetes.io/ingress.class` // annotation. For backwards compatibility, when that annotation is set, it @@ -82,22 +84,22 @@ type IngressSpec struct { // +optional IngressClassName *string `json:"ingressClassName,omitempty" protobuf:"bytes,4,opt,name=ingressClassName"` - // A default backend capable of servicing requests that don't match any + // backend is the default backend capable of servicing requests that don't match any // rule. At least one of 'backend' or 'rules' must be specified. This field // is optional to allow the loadbalancer controller or defaulting logic to // specify a global default. // +optional Backend *IngressBackend `json:"backend,omitempty" protobuf:"bytes,1,opt,name=backend"` - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +optional TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` - // A list of host rules used to configure the Ingress. If unspecified, or + // rules is a list of host rules used to configure the Ingress. If unspecified, or // no rule matches, all traffic is sent to the default backend. // +optional Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` @@ -106,13 +108,14 @@ type IngressSpec struct { // IngressTLS describes the transport layer security associated with an Ingress. type IngressTLS struct { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +optional Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` - // SecretName is the name of the secret used to terminate TLS traffic on + + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination @@ -122,31 +125,31 @@ type IngressTLS struct { // TODO: Consider specifying different modes of termination, protocols etc. } -// IngressStatus describe the current state of the Ingress. +// IngressStatus describes the current state of the Ingress. type IngressStatus struct { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. // +optional LoadBalancer IngressLoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` } // LoadBalancerStatus represents the status of a load-balancer. type IngressLoadBalancerStatus struct { - // Ingress is a list containing ingress points for the load-balancer. + // ingress is a list containing ingress points for the load-balancer. // +optional Ingress []IngressLoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` } // IngressLoadBalancerIngress represents the status of a load-balancer ingress point. type IngressLoadBalancerIngress struct { - // IP is set for load-balancer ingress points that are IP based. + // ip is set for load-balancer ingress points that are IP based. // +optional IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` - // Hostname is set for load-balancer ingress points that are DNS based. + // hostname is set for load-balancer ingress points that are DNS based. // +optional Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` - // Ports provides information about the ports exposed by this LoadBalancer. + // ports provides information about the ports exposed by this LoadBalancer. // +listType=atomic // +optional Ports []IngressPortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"` @@ -154,14 +157,14 @@ type IngressLoadBalancerIngress struct { // IngressPortStatus represents the error condition of a service port type IngressPortStatus struct { - // Port is the port number of the ingress port. + // port is the port number of the ingress port. Port int32 `json:"port" protobuf:"varint,1,opt,name=port"` - // Protocol is the protocol of the ingress port. + // protocol is the protocol of the ingress port. // The supported values are: "TCP", "UDP", "SCTP" Protocol v1.Protocol `json:"protocol" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` - // Error is to record the problem with the service port + // error is to record the problem with the service port // The format of the error shall comply with the following rules: // - built-in error values shall be specified in this file and those shall use // CamelCase names @@ -180,7 +183,7 @@ type IngressPortStatus struct { // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. type IngressRule struct { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -193,7 +196,7 @@ type IngressRule struct { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and @@ -204,6 +207,7 @@ type IngressRule struct { // is to equal to the suffix (removing the first label) of the wildcard rule. // +optional Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` + // IngressRuleValue represents a rule to route requests for this IngressRule. // If unspecified, the rule defaults to a http catch-all. Whether that sends // just traffic matching the host to the default backend or all traffic to the @@ -234,7 +238,7 @@ type IngressRuleValue struct { // to match against everything after the last '/' and before the first '?' // or '#'. type HTTPIngressRuleValue struct { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` // TODO: Consider adding fields for ingress-type specific global // options usable by a loadbalancer, like http keep-alive. @@ -273,14 +277,14 @@ const ( // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. type HTTPIngressPath struct { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -297,22 +301,22 @@ type HTTPIngressPath struct { // Defaults to ImplementationSpecific. PathType *PathType `json:"pathType,omitempty" protobuf:"bytes,3,opt,name=pathType"` - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` } // IngressBackend describes all endpoints for a given service and port. type IngressBackend struct { - // Specifies the name of the referenced service. + // serviceName specifies the name of the referenced service. // +optional ServiceName string `json:"serviceName,omitempty" protobuf:"bytes,1,opt,name=serviceName"` - // Specifies the port of the referenced service. + // servicePort Specifies the port of the referenced service. // +optional ServicePort intstr.IntOrString `json:"servicePort,omitempty" protobuf:"bytes,2,opt,name=servicePort"` - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, serviceName and servicePort // must not be specified. // +optional @@ -333,12 +337,13 @@ type IngressBackend struct { // resources without a class specified will be assigned this default class. type IngressClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressClassSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -346,15 +351,15 @@ type IngressClass struct { // IngressClassSpec provides information about the class of an Ingress. type IngressClassSpec struct { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. Controller string `json:"controller,omitempty" protobuf:"bytes,1,opt,name=controller"` - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -373,19 +378,23 @@ const ( // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. type IngressClassParametersReference struct { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional APIGroup *string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=aPIGroup"` - // Kind is the type of resource being referenced. + + // kind is the type of resource being referenced. Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` - // Name is the name of resource being referenced. + + // name is the name of resource being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` - // Scope represents if this refers to a cluster or namespace scoped resource. + + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". Scope *string `json:"scope" protobuf:"bytes,4,opt,name=scope"` - // Namespace is the namespace of the resource being referenced. This field is + + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -404,6 +413,6 @@ type IngressClassList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of IngressClasses. + // items is the list of IngressClasses. Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go index 195d535c5..b2373669f 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go @@ -24,14 +24,14 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_HTTPIngressPath = map[string]string{ "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", - "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", - "pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types. Defaults to ImplementationSpecific.", - "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", + "path": "path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", + "pathType": "pathType determines the interpretation of the path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types. Defaults to ImplementationSpecific.", + "backend": "backend defines the referenced service endpoint to which the traffic will be forwarded to.", } func (HTTPIngressPath) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (HTTPIngressPath) SwaggerDoc() map[string]string { var map_HTTPIngressRuleValue = map[string]string{ "": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", - "paths": "A collection of paths that map requests to backends.", + "paths": "paths is a collection of paths that map requests to backends.", } func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { @@ -50,8 +50,8 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { var map_Ingress = map[string]string{ "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Ingress) SwaggerDoc() map[string]string { @@ -60,9 +60,9 @@ func (Ingress) SwaggerDoc() map[string]string { var map_IngressBackend = map[string]string{ "": "IngressBackend describes all endpoints for a given service and port.", - "serviceName": "Specifies the name of the referenced service.", - "servicePort": "Specifies the port of the referenced service.", - "resource": "Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified.", + "serviceName": "serviceName specifies the name of the referenced service.", + "servicePort": "servicePort Specifies the port of the referenced service.", + "resource": "resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified.", } func (IngressBackend) SwaggerDoc() map[string]string { @@ -72,7 +72,7 @@ func (IngressBackend) SwaggerDoc() map[string]string { var map_IngressClass = map[string]string{ "": "IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (IngressClass) SwaggerDoc() map[string]string { @@ -82,7 +82,7 @@ func (IngressClass) SwaggerDoc() map[string]string { var map_IngressClassList = map[string]string{ "": "IngressClassList is a collection of IngressClasses.", "metadata": "Standard list metadata.", - "items": "Items is the list of IngressClasses.", + "items": "items is the list of IngressClasses.", } func (IngressClassList) SwaggerDoc() map[string]string { @@ -91,11 +91,11 @@ func (IngressClassList) SwaggerDoc() map[string]string { var map_IngressClassParametersReference = map[string]string{ "": "IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource.", - "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", - "kind": "Kind is the type of resource being referenced.", - "name": "Name is the name of resource being referenced.", - "scope": "Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", - "namespace": "Namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", + "apiGroup": "apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "kind": "kind is the type of resource being referenced.", + "name": "name is the name of resource being referenced.", + "scope": "scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", + "namespace": "namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", } func (IngressClassParametersReference) SwaggerDoc() map[string]string { @@ -104,8 +104,8 @@ func (IngressClassParametersReference) SwaggerDoc() map[string]string { var map_IngressClassSpec = map[string]string{ "": "IngressClassSpec provides information about the class of an Ingress.", - "controller": "Controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", - "parameters": "Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", + "controller": "controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", + "parameters": "parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", } func (IngressClassSpec) SwaggerDoc() map[string]string { @@ -115,7 +115,7 @@ func (IngressClassSpec) SwaggerDoc() map[string]string { var map_IngressList = map[string]string{ "": "IngressList is a collection of Ingress.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of Ingress.", + "items": "items is the list of Ingress.", } func (IngressList) SwaggerDoc() map[string]string { @@ -124,9 +124,9 @@ func (IngressList) SwaggerDoc() map[string]string { var map_IngressLoadBalancerIngress = map[string]string{ "": "IngressLoadBalancerIngress represents the status of a load-balancer ingress point.", - "ip": "IP is set for load-balancer ingress points that are IP based.", - "hostname": "Hostname is set for load-balancer ingress points that are DNS based.", - "ports": "Ports provides information about the ports exposed by this LoadBalancer.", + "ip": "ip is set for load-balancer ingress points that are IP based.", + "hostname": "hostname is set for load-balancer ingress points that are DNS based.", + "ports": "ports provides information about the ports exposed by this LoadBalancer.", } func (IngressLoadBalancerIngress) SwaggerDoc() map[string]string { @@ -135,7 +135,7 @@ func (IngressLoadBalancerIngress) SwaggerDoc() map[string]string { var map_IngressLoadBalancerStatus = map[string]string{ "": "LoadBalancerStatus represents the status of a load-balancer.", - "ingress": "Ingress is a list containing ingress points for the load-balancer.", + "ingress": "ingress is a list containing ingress points for the load-balancer.", } func (IngressLoadBalancerStatus) SwaggerDoc() map[string]string { @@ -144,9 +144,9 @@ func (IngressLoadBalancerStatus) SwaggerDoc() map[string]string { var map_IngressPortStatus = map[string]string{ "": "IngressPortStatus represents the error condition of a service port", - "port": "Port is the port number of the ingress port.", - "protocol": "Protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", - "error": "Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", + "port": "port is the port number of the ingress port.", + "protocol": "protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", + "error": "error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", } func (IngressPortStatus) SwaggerDoc() map[string]string { @@ -155,7 +155,7 @@ func (IngressPortStatus) SwaggerDoc() map[string]string { var map_IngressRule = map[string]string{ "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", - "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nHost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", + "host": "host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nhost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", } func (IngressRule) SwaggerDoc() map[string]string { @@ -172,10 +172,10 @@ func (IngressRuleValue) SwaggerDoc() map[string]string { var map_IngressSpec = map[string]string{ "": "IngressSpec describes the Ingress the user wishes to exist.", - "ingressClassName": "IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.", - "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", - "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", - "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", + "ingressClassName": "ingressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.", + "backend": "backend is the default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", + "tls": "tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", + "rules": "rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", } func (IngressSpec) SwaggerDoc() map[string]string { @@ -183,8 +183,8 @@ func (IngressSpec) SwaggerDoc() map[string]string { } var map_IngressStatus = map[string]string{ - "": "IngressStatus describe the current state of the Ingress.", - "loadBalancer": "LoadBalancer contains the current status of the load-balancer.", + "": "IngressStatus describes the current state of the Ingress.", + "loadBalancer": "loadBalancer contains the current status of the load-balancer.", } func (IngressStatus) SwaggerDoc() map[string]string { @@ -193,8 +193,8 @@ func (IngressStatus) SwaggerDoc() map[string]string { var map_IngressTLS = map[string]string{ "": "IngressTLS describes the transport layer security associated with an Ingress.", - "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", - "secretName": "SecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", + "hosts": "hosts is a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", + "secretName": "secretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", } func (IngressTLS) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/node/v1/generated.proto b/vendor/k8s.io/api/node/v1/generated.proto index 294be85b6..0152d5e3a 100644 --- a/vendor/k8s.io/api/node/v1/generated.proto +++ b/vendor/k8s.io/api/node/v1/generated.proto @@ -32,7 +32,7 @@ option go_package = "k8s.io/api/node/v1"; // Overhead structure represents the resource overhead associated with running a pod. message Overhead { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional map podFixed = 1; } @@ -49,7 +49,7 @@ message RuntimeClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -61,13 +61,13 @@ message RuntimeClass { // and is immutable. optional string handler = 2; - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/ // +optional optional Overhead overhead = 3; - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -82,7 +82,7 @@ message RuntimeClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated RuntimeClass items = 2; } diff --git a/vendor/k8s.io/api/node/v1/types.go b/vendor/k8s.io/api/node/v1/types.go index 984696d98..b00f58772 100644 --- a/vendor/k8s.io/api/node/v1/types.go +++ b/vendor/k8s.io/api/node/v1/types.go @@ -34,11 +34,12 @@ import ( // https://kubernetes.io/docs/concepts/containers/runtime-class/ type RuntimeClass struct { metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -50,13 +51,13 @@ type RuntimeClass struct { // and is immutable. Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/ // +optional Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,3,opt,name=overhead"` - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -66,7 +67,7 @@ type RuntimeClass struct { // Overhead structure represents the resource overhead associated with running a pod. type Overhead struct { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` } @@ -96,11 +97,12 @@ type Scheduling struct { // RuntimeClassList is a list of RuntimeClass objects. type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go index a9eddc60e..f5e6b3277 100644 --- a/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go @@ -24,12 +24,12 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Overhead = map[string]string{ "": "Overhead structure represents the resource overhead associated with running a pod.", - "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", + "podFixed": "podFixed represents the fixed resource overhead associated with running a pod.", } func (Overhead) SwaggerDoc() map[string]string { @@ -39,9 +39,9 @@ func (Overhead) SwaggerDoc() map[string]string { var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://kubernetes.io/docs/concepts/containers/runtime-class/", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see\n https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/", - "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", + "handler": "handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see\n https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/", + "scheduling": "scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -51,7 +51,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (RuntimeClassList) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.proto b/vendor/k8s.io/api/node/v1alpha1/generated.proto index d46e0ec6a..4673e9261 100644 --- a/vendor/k8s.io/api/node/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/node/v1alpha1/generated.proto @@ -32,7 +32,7 @@ option go_package = "k8s.io/api/node/v1alpha1"; // Overhead structure represents the resource overhead associated with running a pod. message Overhead { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional map podFixed = 1; } @@ -49,7 +49,7 @@ message RuntimeClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the RuntimeClass + // spec represents specification of the RuntimeClass // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status optional RuntimeClassSpec spec = 2; } @@ -61,7 +61,7 @@ message RuntimeClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated RuntimeClass items = 2; } @@ -70,7 +70,7 @@ message RuntimeClassList { // Interface (CRI) implementation, as well as any other components that need to // understand how the pod will be run. The RuntimeClassSpec is immutable. message RuntimeClassSpec { - // RuntimeHandler specifies the underlying runtime and configuration that the + // runtimeHandler specifies the underlying runtime and configuration that the // CRI implementation will use to handle pods of this class. The possible // values are specific to the node & CRI configuration. It is assumed that // all handlers are available on every node, and handlers of the same name are @@ -78,17 +78,17 @@ message RuntimeClassSpec { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) + // The runtimeHandler must be lowercase, conform to the DNS Label (RFC 1123) // requirements, and is immutable. optional string runtimeHandler = 1; - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md // +optional optional Overhead overhead = 2; - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. diff --git a/vendor/k8s.io/api/node/v1alpha1/types.go b/vendor/k8s.io/api/node/v1alpha1/types.go index 588c8e4c0..bf9e284bf 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types.go +++ b/vendor/k8s.io/api/node/v1alpha1/types.go @@ -34,11 +34,12 @@ import ( // https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class type RuntimeClass struct { metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the RuntimeClass + // spec represents specification of the RuntimeClass // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status Spec RuntimeClassSpec `json:"spec" protobuf:"bytes,2,name=spec"` } @@ -48,7 +49,7 @@ type RuntimeClass struct { // Interface (CRI) implementation, as well as any other components that need to // understand how the pod will be run. The RuntimeClassSpec is immutable. type RuntimeClassSpec struct { - // RuntimeHandler specifies the underlying runtime and configuration that the + // runtimeHandler specifies the underlying runtime and configuration that the // CRI implementation will use to handle pods of this class. The possible // values are specific to the node & CRI configuration. It is assumed that // all handlers are available on every node, and handlers of the same name are @@ -56,17 +57,17 @@ type RuntimeClassSpec struct { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) + // The runtimeHandler must be lowercase, conform to the DNS Label (RFC 1123) // requirements, and is immutable. RuntimeHandler string `json:"runtimeHandler" protobuf:"bytes,1,opt,name=runtimeHandler"` - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md // +optional Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,2,opt,name=overhead"` - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -76,7 +77,7 @@ type RuntimeClassSpec struct { // Overhead structure represents the resource overhead associated with running a pod. type Overhead struct { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` } @@ -106,11 +107,12 @@ type Scheduling struct { // RuntimeClassList is a list of RuntimeClass objects. type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go index 96413754f..ccc1b7085 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go @@ -24,12 +24,12 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Overhead = map[string]string{ "": "Overhead structure represents the resource overhead associated with running a pod.", - "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", + "podFixed": "podFixed represents the fixed resource overhead associated with running a pod.", } func (Overhead) SwaggerDoc() map[string]string { @@ -39,7 +39,7 @@ func (Overhead) SwaggerDoc() map[string]string { var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec represents specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -49,7 +49,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (RuntimeClassList) SwaggerDoc() map[string]string { @@ -58,9 +58,9 @@ func (RuntimeClassList) SwaggerDoc() map[string]string { var map_RuntimeClassSpec = map[string]string{ "": "RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable.", - "runtimeHandler": "RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", - "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", + "runtimeHandler": "runtimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The runtimeHandler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", + "scheduling": "scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClassSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/node/v1beta1/generated.proto b/vendor/k8s.io/api/node/v1beta1/generated.proto index 8ffad6973..54dbc0995 100644 --- a/vendor/k8s.io/api/node/v1beta1/generated.proto +++ b/vendor/k8s.io/api/node/v1beta1/generated.proto @@ -32,7 +32,7 @@ option go_package = "k8s.io/api/node/v1beta1"; // Overhead structure represents the resource overhead associated with running a pod. message Overhead { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional map podFixed = 1; } @@ -49,7 +49,7 @@ message RuntimeClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -57,17 +57,17 @@ message RuntimeClass { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // The handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, // and is immutable. optional string handler = 2; - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md // +optional optional Overhead overhead = 3; - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -82,7 +82,7 @@ message RuntimeClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated RuntimeClass items = 2; } diff --git a/vendor/k8s.io/api/node/v1beta1/types.go b/vendor/k8s.io/api/node/v1beta1/types.go index b924cb421..74ecca26a 100644 --- a/vendor/k8s.io/api/node/v1beta1/types.go +++ b/vendor/k8s.io/api/node/v1beta1/types.go @@ -36,11 +36,12 @@ import ( // https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class type RuntimeClass struct { metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -48,17 +49,17 @@ type RuntimeClass struct { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // The handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, // and is immutable. Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md // +optional Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,3,opt,name=overhead"` - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -68,7 +69,7 @@ type RuntimeClass struct { // Overhead structure represents the resource overhead associated with running a pod. type Overhead struct { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` } @@ -100,11 +101,12 @@ type Scheduling struct { // RuntimeClassList is a list of RuntimeClass objects. type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go index fec4398b2..086105ecc 100644 --- a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go @@ -24,12 +24,12 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Overhead = map[string]string{ "": "Overhead structure represents the resource overhead associated with running a pod.", - "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", + "podFixed": "podFixed represents the fixed resource overhead associated with running a pod.", } func (Overhead) SwaggerDoc() map[string]string { @@ -39,9 +39,9 @@ func (Overhead) SwaggerDoc() map[string]string { var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", - "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", + "handler": "handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", + "scheduling": "scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -51,7 +51,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (RuntimeClassList) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/policy/v1/generated.proto b/vendor/k8s.io/api/policy/v1/generated.proto index 0a1e010b9..a79e71028 100644 --- a/vendor/k8s.io/api/policy/v1/generated.proto +++ b/vendor/k8s.io/api/policy/v1/generated.proto @@ -116,8 +116,8 @@ message PodDisruptionBudgetSpec { // Clients making eviction decisions should disallow eviction of unhealthy pods // if they encounter an unrecognized policy in this field. // - // This field is alpha-level. The eviction API uses this field when - // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (disabled by default). + // This field is beta-level. The eviction API uses this field when + // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). // +optional optional string unhealthyPodEvictionPolicy = 4; } diff --git a/vendor/k8s.io/api/policy/v1/types.go b/vendor/k8s.io/api/policy/v1/types.go index 6aec30b89..45b9550f4 100644 --- a/vendor/k8s.io/api/policy/v1/types.go +++ b/vendor/k8s.io/api/policy/v1/types.go @@ -71,8 +71,8 @@ type PodDisruptionBudgetSpec struct { // Clients making eviction decisions should disallow eviction of unhealthy pods // if they encounter an unrecognized policy in this field. // - // This field is alpha-level. The eviction API uses this field when - // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (disabled by default). + // This field is beta-level. The eviction API uses this field when + // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). // +optional UnhealthyPodEvictionPolicy *UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty" protobuf:"bytes,4,opt,name=unhealthyPodEvictionPolicy"` } diff --git a/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go index 582b28c15..799b0794a 100644 --- a/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Eviction = map[string]string{ @@ -63,7 +63,7 @@ var map_PodDisruptionBudgetSpec = map[string]string{ "minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".", "selector": "Label query over pods whose evictions are managed by the disruption budget. A null selector will match no pods, while an empty ({}) selector will select all pods within the namespace.", "maxUnavailable": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".", - "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is alpha-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (disabled by default).", + "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).", } func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index 989b48458..16301c236 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -177,8 +177,8 @@ message PodDisruptionBudgetSpec { // Clients making eviction decisions should disallow eviction of unhealthy pods // if they encounter an unrecognized policy in this field. // - // This field is alpha-level. The eviction API uses this field when - // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (disabled by default). + // This field is beta-level. The eviction API uses this field when + // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). // +optional optional string unhealthyPodEvictionPolicy = 4; } diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go index 863b2b873..1e6b075e3 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types.go +++ b/vendor/k8s.io/api/policy/v1beta1/types.go @@ -69,8 +69,8 @@ type PodDisruptionBudgetSpec struct { // Clients making eviction decisions should disallow eviction of unhealthy pods // if they encounter an unrecognized policy in this field. // - // This field is alpha-level. The eviction API uses this field when - // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (disabled by default). + // This field is beta-level. The eviction API uses this field when + // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). // +optional UnhealthyPodEvictionPolicy *UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty" protobuf:"bytes,4,opt,name=unhealthyPodEvictionPolicy"` } diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go index cebba07f4..266a9a853 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AllowedCSIDriver = map[string]string{ @@ -121,7 +121,7 @@ var map_PodDisruptionBudgetSpec = map[string]string{ "minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".", "selector": "Label query over pods whose evictions are managed by the disruption budget. A null selector selects no pods. An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods. In policy/v1, an empty selector will select all pods in the namespace.", "maxUnavailable": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".", - "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is alpha-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (disabled by default).", + "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).", } func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go index 63aa4ed7b..370398198 100644 --- a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go index 08578aba9..6708f3e58 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ diff --git a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go index db9525832..fff1fe40f 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ diff --git a/vendor/k8s.io/api/resource/v1alpha1/doc.go b/vendor/k8s.io/api/resource/v1alpha2/doc.go similarity index 84% rename from vendor/k8s.io/api/resource/v1alpha1/doc.go rename to vendor/k8s.io/api/resource/v1alpha2/doc.go index 8fa577fab..d9c20e089 100644 --- a/vendor/k8s.io/api/resource/v1alpha1/doc.go +++ b/vendor/k8s.io/api/resource/v1alpha2/doc.go @@ -20,5 +20,5 @@ limitations under the License. // +groupName=resource.k8s.io -// Package v1alpha1 is the v1alpha1 version of the resource API. -package v1alpha1 // import "k8s.io/api/resource/v1alpha1" +// Package v1alpha2 is the v1alpha2 version of the resource API. +package v1alpha2 // import "k8s.io/api/resource/v1alpha2" diff --git a/vendor/k8s.io/api/resource/v1alpha1/generated.pb.go b/vendor/k8s.io/api/resource/v1alpha2/generated.pb.go similarity index 83% rename from vendor/k8s.io/api/resource/v1alpha1/generated.pb.go rename to vendor/k8s.io/api/resource/v1alpha2/generated.pb.go index 632ad0425..2e8f9c724 100644 --- a/vendor/k8s.io/api/resource/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/resource/v1alpha2/generated.pb.go @@ -15,9 +15,9 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/resource/v1alpha1/generated.proto +// source: k8s.io/kubernetes/vendor/k8s.io/api/resource/v1alpha2/generated.proto -package v1alpha1 +package v1alpha2 import ( fmt "fmt" @@ -49,7 +49,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *AllocationResult) Reset() { *m = AllocationResult{} } func (*AllocationResult) ProtoMessage() {} func (*AllocationResult) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{0} + return fileDescriptor_3add37bbd52889e0, []int{0} } func (m *AllocationResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -74,15 +74,15 @@ func (m *AllocationResult) XXX_DiscardUnknown() { var xxx_messageInfo_AllocationResult proto.InternalMessageInfo -func (m *PodScheduling) Reset() { *m = PodScheduling{} } -func (*PodScheduling) ProtoMessage() {} -func (*PodScheduling) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{1} +func (m *PodSchedulingContext) Reset() { *m = PodSchedulingContext{} } +func (*PodSchedulingContext) ProtoMessage() {} +func (*PodSchedulingContext) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{1} } -func (m *PodScheduling) XXX_Unmarshal(b []byte) error { +func (m *PodSchedulingContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *PodScheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *PodSchedulingContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -90,27 +90,27 @@ func (m *PodScheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error } return b[:n], nil } -func (m *PodScheduling) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodScheduling.Merge(m, src) +func (m *PodSchedulingContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContext.Merge(m, src) } -func (m *PodScheduling) XXX_Size() int { +func (m *PodSchedulingContext) XXX_Size() int { return m.Size() } -func (m *PodScheduling) XXX_DiscardUnknown() { - xxx_messageInfo_PodScheduling.DiscardUnknown(m) +func (m *PodSchedulingContext) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContext.DiscardUnknown(m) } -var xxx_messageInfo_PodScheduling proto.InternalMessageInfo +var xxx_messageInfo_PodSchedulingContext proto.InternalMessageInfo -func (m *PodSchedulingList) Reset() { *m = PodSchedulingList{} } -func (*PodSchedulingList) ProtoMessage() {} -func (*PodSchedulingList) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{2} +func (m *PodSchedulingContextList) Reset() { *m = PodSchedulingContextList{} } +func (*PodSchedulingContextList) ProtoMessage() {} +func (*PodSchedulingContextList) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{2} } -func (m *PodSchedulingList) XXX_Unmarshal(b []byte) error { +func (m *PodSchedulingContextList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *PodSchedulingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *PodSchedulingContextList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -118,27 +118,27 @@ func (m *PodSchedulingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, e } return b[:n], nil } -func (m *PodSchedulingList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingList.Merge(m, src) +func (m *PodSchedulingContextList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContextList.Merge(m, src) } -func (m *PodSchedulingList) XXX_Size() int { +func (m *PodSchedulingContextList) XXX_Size() int { return m.Size() } -func (m *PodSchedulingList) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingList.DiscardUnknown(m) +func (m *PodSchedulingContextList) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContextList.DiscardUnknown(m) } -var xxx_messageInfo_PodSchedulingList proto.InternalMessageInfo +var xxx_messageInfo_PodSchedulingContextList proto.InternalMessageInfo -func (m *PodSchedulingSpec) Reset() { *m = PodSchedulingSpec{} } -func (*PodSchedulingSpec) ProtoMessage() {} -func (*PodSchedulingSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{3} +func (m *PodSchedulingContextSpec) Reset() { *m = PodSchedulingContextSpec{} } +func (*PodSchedulingContextSpec) ProtoMessage() {} +func (*PodSchedulingContextSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{3} } -func (m *PodSchedulingSpec) XXX_Unmarshal(b []byte) error { +func (m *PodSchedulingContextSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *PodSchedulingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *PodSchedulingContextSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -146,27 +146,27 @@ func (m *PodSchedulingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, e } return b[:n], nil } -func (m *PodSchedulingSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingSpec.Merge(m, src) +func (m *PodSchedulingContextSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContextSpec.Merge(m, src) } -func (m *PodSchedulingSpec) XXX_Size() int { +func (m *PodSchedulingContextSpec) XXX_Size() int { return m.Size() } -func (m *PodSchedulingSpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingSpec.DiscardUnknown(m) +func (m *PodSchedulingContextSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContextSpec.DiscardUnknown(m) } -var xxx_messageInfo_PodSchedulingSpec proto.InternalMessageInfo +var xxx_messageInfo_PodSchedulingContextSpec proto.InternalMessageInfo -func (m *PodSchedulingStatus) Reset() { *m = PodSchedulingStatus{} } -func (*PodSchedulingStatus) ProtoMessage() {} -func (*PodSchedulingStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{4} +func (m *PodSchedulingContextStatus) Reset() { *m = PodSchedulingContextStatus{} } +func (*PodSchedulingContextStatus) ProtoMessage() {} +func (*PodSchedulingContextStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{4} } -func (m *PodSchedulingStatus) XXX_Unmarshal(b []byte) error { +func (m *PodSchedulingContextStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *PodSchedulingStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *PodSchedulingContextStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -174,22 +174,22 @@ func (m *PodSchedulingStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, } return b[:n], nil } -func (m *PodSchedulingStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingStatus.Merge(m, src) +func (m *PodSchedulingContextStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContextStatus.Merge(m, src) } -func (m *PodSchedulingStatus) XXX_Size() int { +func (m *PodSchedulingContextStatus) XXX_Size() int { return m.Size() } -func (m *PodSchedulingStatus) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingStatus.DiscardUnknown(m) +func (m *PodSchedulingContextStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContextStatus.DiscardUnknown(m) } -var xxx_messageInfo_PodSchedulingStatus proto.InternalMessageInfo +var xxx_messageInfo_PodSchedulingContextStatus proto.InternalMessageInfo func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } func (*ResourceClaim) ProtoMessage() {} func (*ResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{5} + return fileDescriptor_3add37bbd52889e0, []int{5} } func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -217,7 +217,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} } func (*ResourceClaimConsumerReference) ProtoMessage() {} func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{6} + return fileDescriptor_3add37bbd52889e0, []int{6} } func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -245,7 +245,7 @@ var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} } func (*ResourceClaimList) ProtoMessage() {} func (*ResourceClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{7} + return fileDescriptor_3add37bbd52889e0, []int{7} } func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -273,7 +273,7 @@ var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo func (m *ResourceClaimParametersReference) Reset() { *m = ResourceClaimParametersReference{} } func (*ResourceClaimParametersReference) ProtoMessage() {} func (*ResourceClaimParametersReference) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{8} + return fileDescriptor_3add37bbd52889e0, []int{8} } func (m *ResourceClaimParametersReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -301,7 +301,7 @@ var xxx_messageInfo_ResourceClaimParametersReference proto.InternalMessageInfo func (m *ResourceClaimSchedulingStatus) Reset() { *m = ResourceClaimSchedulingStatus{} } func (*ResourceClaimSchedulingStatus) ProtoMessage() {} func (*ResourceClaimSchedulingStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{9} + return fileDescriptor_3add37bbd52889e0, []int{9} } func (m *ResourceClaimSchedulingStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -329,7 +329,7 @@ var xxx_messageInfo_ResourceClaimSchedulingStatus proto.InternalMessageInfo func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} } func (*ResourceClaimSpec) ProtoMessage() {} func (*ResourceClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{10} + return fileDescriptor_3add37bbd52889e0, []int{10} } func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -357,7 +357,7 @@ var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} } func (*ResourceClaimStatus) ProtoMessage() {} func (*ResourceClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{11} + return fileDescriptor_3add37bbd52889e0, []int{11} } func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -385,7 +385,7 @@ var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} } func (*ResourceClaimTemplate) ProtoMessage() {} func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{12} + return fileDescriptor_3add37bbd52889e0, []int{12} } func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -413,7 +413,7 @@ var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} } func (*ResourceClaimTemplateList) ProtoMessage() {} func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{13} + return fileDescriptor_3add37bbd52889e0, []int{13} } func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -441,7 +441,7 @@ var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} } func (*ResourceClaimTemplateSpec) ProtoMessage() {} func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{14} + return fileDescriptor_3add37bbd52889e0, []int{14} } func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -469,7 +469,7 @@ var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo func (m *ResourceClass) Reset() { *m = ResourceClass{} } func (*ResourceClass) ProtoMessage() {} func (*ResourceClass) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{15} + return fileDescriptor_3add37bbd52889e0, []int{15} } func (m *ResourceClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -497,7 +497,7 @@ var xxx_messageInfo_ResourceClass proto.InternalMessageInfo func (m *ResourceClassList) Reset() { *m = ResourceClassList{} } func (*ResourceClassList) ProtoMessage() {} func (*ResourceClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{16} + return fileDescriptor_3add37bbd52889e0, []int{16} } func (m *ResourceClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -525,7 +525,7 @@ var xxx_messageInfo_ResourceClassList proto.InternalMessageInfo func (m *ResourceClassParametersReference) Reset() { *m = ResourceClassParametersReference{} } func (*ResourceClassParametersReference) ProtoMessage() {} func (*ResourceClassParametersReference) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{17} + return fileDescriptor_3add37bbd52889e0, []int{17} } func (m *ResourceClassParametersReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -550,107 +550,140 @@ func (m *ResourceClassParametersReference) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceClassParametersReference proto.InternalMessageInfo +func (m *ResourceHandle) Reset() { *m = ResourceHandle{} } +func (*ResourceHandle) ProtoMessage() {} +func (*ResourceHandle) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{18} +} +func (m *ResourceHandle) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceHandle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceHandle) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceHandle.Merge(m, src) +} +func (m *ResourceHandle) XXX_Size() int { + return m.Size() +} +func (m *ResourceHandle) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceHandle.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceHandle proto.InternalMessageInfo + func init() { - proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1alpha1.AllocationResult") - proto.RegisterType((*PodScheduling)(nil), "k8s.io.api.resource.v1alpha1.PodScheduling") - proto.RegisterType((*PodSchedulingList)(nil), "k8s.io.api.resource.v1alpha1.PodSchedulingList") - proto.RegisterType((*PodSchedulingSpec)(nil), "k8s.io.api.resource.v1alpha1.PodSchedulingSpec") - proto.RegisterType((*PodSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha1.PodSchedulingStatus") - proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaim") - proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimConsumerReference") - proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimList") - proto.RegisterType((*ResourceClaimParametersReference)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimParametersReference") - proto.RegisterType((*ResourceClaimSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimSchedulingStatus") - proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimSpec") - proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimStatus") - proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimTemplate") - proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimTemplateList") - proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimTemplateSpec") - proto.RegisterType((*ResourceClass)(nil), "k8s.io.api.resource.v1alpha1.ResourceClass") - proto.RegisterType((*ResourceClassList)(nil), "k8s.io.api.resource.v1alpha1.ResourceClassList") - proto.RegisterType((*ResourceClassParametersReference)(nil), "k8s.io.api.resource.v1alpha1.ResourceClassParametersReference") + proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1alpha2.AllocationResult") + proto.RegisterType((*PodSchedulingContext)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContext") + proto.RegisterType((*PodSchedulingContextList)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextList") + proto.RegisterType((*PodSchedulingContextSpec)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextSpec") + proto.RegisterType((*PodSchedulingContextStatus)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextStatus") + proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaim") + proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimConsumerReference") + proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimList") + proto.RegisterType((*ResourceClaimParametersReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimParametersReference") + proto.RegisterType((*ResourceClaimSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimSchedulingStatus") + proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimSpec") + proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimStatus") + proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplate") + proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplateList") + proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplateSpec") + proto.RegisterType((*ResourceClass)(nil), "k8s.io.api.resource.v1alpha2.ResourceClass") + proto.RegisterType((*ResourceClassList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassList") + proto.RegisterType((*ResourceClassParametersReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassParametersReference") + proto.RegisterType((*ResourceHandle)(nil), "k8s.io.api.resource.v1alpha2.ResourceHandle") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/resource/v1alpha1/generated.proto", fileDescriptor_a66b2ee03d862be2) -} - -var fileDescriptor_a66b2ee03d862be2 = []byte{ - // 1174 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xcd, 0x6f, 0x1b, 0x45, - 0x14, 0xcf, 0xc6, 0x6e, 0x95, 0x8c, 0x1b, 0x37, 0xd9, 0x34, 0xc8, 0x8d, 0x5a, 0xdb, 0xec, 0xc9, - 0x12, 0xb0, 0xdb, 0x04, 0x04, 0x15, 0x1f, 0x95, 0xb2, 0x0d, 0x94, 0x08, 0x9a, 0x9a, 0x31, 0x91, - 0x08, 0x42, 0x88, 0xf1, 0xee, 0xab, 0xbd, 0x64, 0xbf, 0xd8, 0xd9, 0x35, 0xaa, 0xb8, 0xf4, 0xca, - 0x0d, 0x21, 0xee, 0x1c, 0xf9, 0x43, 0x10, 0x52, 0x8e, 0x91, 0xe0, 0xd0, 0x93, 0x45, 0xcc, 0x81, - 0x3f, 0x80, 0x13, 0x3d, 0xa1, 0x19, 0xef, 0xae, 0x77, 0xd6, 0x1f, 0xc4, 0x11, 0x8a, 0xc2, 0x29, - 0x99, 0x79, 0xbf, 0xf7, 0x9b, 0xf7, 0x31, 0xef, 0xcd, 0x5b, 0xa3, 0x77, 0x8f, 0xee, 0x52, 0xd5, - 0xf2, 0xb4, 0xa3, 0xa8, 0x0d, 0x81, 0x0b, 0x21, 0x50, 0xad, 0x07, 0xae, 0xe9, 0x05, 0x5a, 0x2c, - 0x20, 0xbe, 0xa5, 0x05, 0x40, 0xbd, 0x28, 0x30, 0x40, 0xeb, 0x6d, 0x11, 0xdb, 0xef, 0x92, 0x2d, - 0xad, 0x03, 0x2e, 0x04, 0x24, 0x04, 0x53, 0xf5, 0x03, 0x2f, 0xf4, 0xe4, 0x5b, 0x43, 0xb4, 0x4a, - 0x7c, 0x4b, 0x4d, 0xd0, 0x6a, 0x82, 0xde, 0x7c, 0xa5, 0x63, 0x85, 0xdd, 0xa8, 0xad, 0x1a, 0x9e, - 0xa3, 0x75, 0xbc, 0x8e, 0xa7, 0x71, 0xa5, 0x76, 0xf4, 0x98, 0xaf, 0xf8, 0x82, 0xff, 0x37, 0x24, - 0xdb, 0x54, 0x32, 0x47, 0x1b, 0x5e, 0xc0, 0x8e, 0xcd, 0x1f, 0xb8, 0xf9, 0xda, 0x08, 0xe3, 0x10, - 0xa3, 0x6b, 0xb9, 0x10, 0x3c, 0xd1, 0xfc, 0xa3, 0x0e, 0xdb, 0xa0, 0x9a, 0x03, 0x21, 0x99, 0xa4, - 0xa5, 0x4d, 0xd3, 0x0a, 0x22, 0x37, 0xb4, 0x1c, 0x18, 0x53, 0x78, 0xfd, 0xdf, 0x14, 0xa8, 0xd1, - 0x05, 0x87, 0xe4, 0xf5, 0x94, 0x3f, 0x25, 0xb4, 0xba, 0x63, 0xdb, 0x9e, 0x41, 0x42, 0xcb, 0x73, - 0x31, 0xd0, 0xc8, 0x0e, 0xe5, 0x7b, 0xa8, 0x9c, 0xc4, 0xe6, 0x7d, 0xe2, 0x9a, 0x36, 0x54, 0xa4, - 0xba, 0xd4, 0x58, 0xd6, 0x5f, 0x38, 0xee, 0xd7, 0x16, 0x06, 0xfd, 0x5a, 0x19, 0x0b, 0x52, 0x9c, - 0x43, 0xcb, 0x6d, 0xb4, 0x4a, 0x7a, 0xc4, 0xb2, 0x49, 0xdb, 0x86, 0x47, 0xee, 0xbe, 0x67, 0x02, - 0xad, 0x2c, 0xd6, 0xa5, 0x46, 0x69, 0xbb, 0xae, 0x66, 0xe2, 0xcf, 0x42, 0xa6, 0xf6, 0xb6, 0x54, - 0x06, 0x68, 0x81, 0x0d, 0x46, 0xe8, 0x05, 0xfa, 0x8d, 0x41, 0xbf, 0xb6, 0xba, 0x93, 0xd3, 0xc6, - 0x63, 0x7c, 0xb2, 0x86, 0x96, 0x69, 0x97, 0x04, 0xc0, 0xf6, 0x2a, 0x85, 0xba, 0xd4, 0x58, 0xd2, - 0xd7, 0x62, 0xf3, 0x96, 0x5b, 0x89, 0x00, 0x8f, 0x30, 0xca, 0x8f, 0x8b, 0x68, 0xa5, 0xe9, 0x99, - 0x2d, 0xa3, 0x0b, 0x66, 0x64, 0x5b, 0x6e, 0x47, 0xfe, 0x02, 0x2d, 0xb1, 0xf8, 0x9b, 0x24, 0x24, - 0xdc, 0xc1, 0xd2, 0xf6, 0x9d, 0x8c, 0x79, 0x69, 0x18, 0x55, 0xff, 0xa8, 0xc3, 0x36, 0xa8, 0xca, - 0xd0, 0xcc, 0xe0, 0x47, 0xed, 0x2f, 0xc1, 0x08, 0x1f, 0x42, 0x48, 0x74, 0x39, 0x3e, 0x13, 0x8d, - 0xf6, 0x70, 0xca, 0x2a, 0x7f, 0x84, 0x8a, 0xd4, 0x07, 0x23, 0x76, 0x5e, 0x53, 0x67, 0x5d, 0x3e, - 0x55, 0x30, 0xae, 0xe5, 0x83, 0xa1, 0x5f, 0x8b, 0xc9, 0x8b, 0x6c, 0x85, 0x39, 0x95, 0x7c, 0x88, - 0xae, 0xd2, 0x90, 0x84, 0x11, 0xe5, 0x4e, 0x97, 0xb6, 0xb7, 0xe6, 0x21, 0xe5, 0x8a, 0x7a, 0x39, - 0xa6, 0xbd, 0x3a, 0x5c, 0xe3, 0x98, 0x50, 0xf9, 0x59, 0x42, 0x6b, 0x02, 0xfe, 0x43, 0x8b, 0x86, - 0xf2, 0x67, 0x63, 0x51, 0x52, 0xcf, 0x16, 0x25, 0xa6, 0xcd, 0x63, 0xb4, 0x1a, 0x9f, 0xb7, 0x94, - 0xec, 0x64, 0x22, 0xd4, 0x44, 0x57, 0xac, 0x10, 0x1c, 0x76, 0x3f, 0x0a, 0x8d, 0xd2, 0xf6, 0x4b, - 0x73, 0x78, 0xa3, 0xaf, 0xc4, 0xbc, 0x57, 0xf6, 0x18, 0x03, 0x1e, 0x12, 0x29, 0xdf, 0xe6, 0xbd, - 0x60, 0xc1, 0x93, 0xef, 0xa2, 0x6b, 0x94, 0x5f, 0x31, 0x30, 0xd9, 0xfd, 0x89, 0x2f, 0xf4, 0x8d, - 0x98, 0xe1, 0x5a, 0x2b, 0x23, 0xc3, 0x02, 0x52, 0x7e, 0x13, 0x95, 0x7d, 0x2f, 0x04, 0x37, 0xb4, - 0x88, 0x9d, 0x5c, 0xe5, 0x42, 0x63, 0x59, 0x97, 0x59, 0x21, 0x34, 0x05, 0x09, 0xce, 0x21, 0x95, - 0xef, 0x25, 0xb4, 0x3e, 0x21, 0x03, 0xf2, 0x37, 0xa3, 0x02, 0xbb, 0x6f, 0x13, 0xcb, 0xa1, 0x15, - 0x89, 0xbb, 0xff, 0xd6, 0x6c, 0xf7, 0x71, 0x56, 0x67, 0x2c, 0xad, 0x63, 0xd5, 0x39, 0xa4, 0xc6, - 0xb9, 0xa3, 0x78, 0x21, 0x08, 0x90, 0xcb, 0x56, 0x08, 0xa2, 0x9b, 0xff, 0x51, 0x21, 0x88, 0xa4, - 0xb3, 0x0b, 0x61, 0x20, 0xa1, 0xaa, 0x80, 0xbf, 0xef, 0xb9, 0x34, 0x72, 0x20, 0xc0, 0xf0, 0x18, - 0x02, 0x70, 0x0d, 0x90, 0x5f, 0x46, 0x4b, 0xc4, 0xb7, 0x1e, 0x04, 0x5e, 0xe4, 0xc7, 0x77, 0x29, - 0xbd, 0xe5, 0x3b, 0xcd, 0x3d, 0xbe, 0x8f, 0x53, 0x04, 0x43, 0x27, 0x16, 0x71, 0x6b, 0x33, 0xe8, - 0xe4, 0x1c, 0x9c, 0x22, 0xe4, 0x3a, 0x2a, 0xba, 0xc4, 0x81, 0x4a, 0x91, 0x23, 0x53, 0xdf, 0xf7, - 0x89, 0x03, 0x98, 0x4b, 0x64, 0x1d, 0x15, 0x22, 0xcb, 0xac, 0x5c, 0xe1, 0x80, 0x3b, 0x31, 0xa0, - 0x70, 0xb0, 0xb7, 0xfb, 0xbc, 0x5f, 0x7b, 0x71, 0xda, 0x4b, 0x10, 0x3e, 0xf1, 0x81, 0xaa, 0x07, - 0x7b, 0xbb, 0x98, 0x29, 0xf3, 0x6a, 0x17, 0x9c, 0xbc, 0x74, 0xd5, 0x2e, 0x58, 0x37, 0xa5, 0xda, - 0x7f, 0x90, 0x50, 0x5d, 0xc0, 0x35, 0x49, 0x40, 0x1c, 0x08, 0x21, 0xa0, 0xe7, 0x4d, 0x56, 0x1d, - 0x15, 0x8f, 0x2c, 0xd7, 0xe4, 0x77, 0x35, 0x13, 0xfe, 0x0f, 0x2c, 0xd7, 0xc4, 0x5c, 0x92, 0x26, - 0xa8, 0x30, 0x2d, 0x41, 0xca, 0x53, 0x09, 0xdd, 0x9e, 0x59, 0xad, 0x29, 0x87, 0x34, 0x35, 0xc9, - 0xef, 0xa0, 0xeb, 0x91, 0x4b, 0x23, 0x2b, 0x64, 0xcf, 0x57, 0xb6, 0xf3, 0xac, 0x0f, 0xfa, 0xb5, - 0xeb, 0x07, 0xa2, 0x08, 0xe7, 0xb1, 0xca, 0x4f, 0x8b, 0xb9, 0xfc, 0xf2, 0x3e, 0xf8, 0x00, 0xad, - 0x65, 0xda, 0x01, 0xa5, 0xfb, 0x23, 0x1b, 0x6e, 0xc6, 0x36, 0x64, 0xb5, 0x86, 0x00, 0x3c, 0xae, - 0x23, 0x7f, 0x8d, 0x56, 0xfc, 0x6c, 0xa8, 0xe3, 0xd2, 0xbe, 0x37, 0x47, 0x4a, 0x27, 0xa4, 0x4a, - 0x5f, 0x1b, 0xf4, 0x6b, 0x2b, 0x82, 0x00, 0x8b, 0xe7, 0xc8, 0x4d, 0x54, 0x26, 0xe9, 0xc0, 0xf2, - 0x90, 0xf5, 0xf2, 0x61, 0x1a, 0x1a, 0x49, 0xfb, 0xdb, 0x11, 0xa4, 0xcf, 0xc7, 0x76, 0x70, 0x4e, - 0x5f, 0xf9, 0x6b, 0x11, 0xad, 0x4f, 0x68, 0x0f, 0xf2, 0x36, 0x42, 0x66, 0x60, 0xf5, 0x20, 0xc8, - 0x04, 0x29, 0x6d, 0x73, 0xbb, 0xa9, 0x04, 0x67, 0x50, 0xf2, 0xe7, 0x08, 0x8d, 0xd8, 0xe3, 0x98, - 0xa8, 0xb3, 0x63, 0x92, 0x1f, 0xbf, 0xf4, 0x32, 0xe3, 0xcf, 0xec, 0x66, 0x18, 0x65, 0x8a, 0x4a, - 0x01, 0x50, 0x08, 0x7a, 0x60, 0xbe, 0xe7, 0x05, 0x95, 0x02, 0xaf, 0xa3, 0xb7, 0xe7, 0x08, 0xfa, - 0x58, 0x2b, 0xd3, 0xd7, 0x63, 0x97, 0x4a, 0x78, 0x44, 0x8c, 0xb3, 0xa7, 0xc8, 0x2d, 0xb4, 0x61, - 0x02, 0xc9, 0x98, 0xf9, 0x55, 0x04, 0x34, 0x04, 0x93, 0x77, 0xa8, 0x25, 0xfd, 0x76, 0x4c, 0xb0, - 0xb1, 0x3b, 0x09, 0x84, 0x27, 0xeb, 0x2a, 0xbf, 0x49, 0x68, 0x43, 0xb0, 0xec, 0x63, 0x70, 0x7c, - 0x9b, 0x84, 0x70, 0x01, 0xcf, 0xd1, 0xa1, 0xf0, 0x1c, 0xbd, 0x31, 0x47, 0xf8, 0x12, 0x23, 0xa7, - 0x3d, 0x4b, 0xca, 0xaf, 0x12, 0xba, 0x39, 0x51, 0xe3, 0x02, 0xda, 0xeb, 0x27, 0x62, 0x7b, 0x7d, - 0xf5, 0x1c, 0x7e, 0x4d, 0x69, 0xb3, 0x27, 0xd3, 0xbc, 0xe2, 0x4d, 0xe5, 0xff, 0x38, 0x3f, 0x28, - 0x7f, 0x8b, 0x63, 0x10, 0xa5, 0x17, 0xe0, 0x86, 0xd8, 0x51, 0x16, 0xcf, 0xd4, 0x51, 0xc6, 0x1a, - 0x6d, 0x61, 0xce, 0x46, 0x4b, 0xe9, 0xf9, 0x1a, 0xed, 0x21, 0x5a, 0x11, 0x5f, 0x9f, 0xe2, 0x19, - 0x3f, 0xe1, 0x38, 0x75, 0x4b, 0x78, 0x9d, 0x44, 0xa6, 0xfc, 0xec, 0x41, 0xe9, 0x65, 0x9e, 0x3d, - 0x28, 0x9d, 0x52, 0x14, 0xbf, 0x88, 0xb3, 0xc7, 0xc4, 0x38, 0x5f, 0xfc, 0xec, 0xc1, 0xbe, 0x8c, - 0xd9, 0x5f, 0xea, 0x13, 0x23, 0x99, 0x21, 0xd3, 0x2f, 0xe3, 0xfd, 0x44, 0x80, 0x47, 0x18, 0x5d, - 0x3f, 0x3e, 0xad, 0x2e, 0x9c, 0x9c, 0x56, 0x17, 0x9e, 0x9d, 0x56, 0x17, 0x9e, 0x0e, 0xaa, 0xd2, - 0xf1, 0xa0, 0x2a, 0x9d, 0x0c, 0xaa, 0xd2, 0xb3, 0x41, 0x55, 0xfa, 0x7d, 0x50, 0x95, 0xbe, 0xfb, - 0xa3, 0xba, 0xf0, 0xe9, 0xad, 0x59, 0xbf, 0xb3, 0xfc, 0x13, 0x00, 0x00, 0xff, 0xff, 0xe7, 0x0a, - 0x8b, 0x49, 0x9f, 0x11, 0x00, 0x00, + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/resource/v1alpha2/generated.proto", fileDescriptor_3add37bbd52889e0) +} + +var fileDescriptor_3add37bbd52889e0 = []byte{ + // 1233 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0xda, 0x6e, 0x95, 0x4c, 0x1a, 0x37, 0xd9, 0xb6, 0xe0, 0x46, 0xad, 0x63, 0xf6, 0x14, + 0x89, 0xb2, 0xdb, 0x06, 0x54, 0x2a, 0xfe, 0x49, 0xd9, 0x06, 0x4a, 0x04, 0x4d, 0xc3, 0x98, 0x8a, + 0x16, 0x21, 0xd4, 0xc9, 0xee, 0xab, 0xbd, 0x64, 0xff, 0xb1, 0x33, 0x6b, 0xa8, 0xb8, 0xf4, 0x23, + 0xf4, 0xc0, 0x01, 0x4e, 0x1c, 0xf9, 0x02, 0x7c, 0x03, 0x84, 0xd4, 0x63, 0x11, 0x1c, 0x7a, 0xb2, + 0xa8, 0xf9, 0x08, 0x9c, 0xe8, 0x09, 0xcd, 0x78, 0x77, 0xbd, 0xb3, 0xf6, 0x9a, 0x38, 0x07, 0x0b, + 0x4e, 0xc9, 0xcc, 0xfb, 0xbd, 0xdf, 0xfb, 0x37, 0xef, 0xcd, 0xac, 0xd1, 0xbb, 0x87, 0xd7, 0xa8, + 0xee, 0x04, 0xc6, 0x61, 0x7c, 0x00, 0x91, 0x0f, 0x0c, 0xa8, 0xd1, 0x03, 0xdf, 0x0e, 0x22, 0x23, + 0x11, 0x90, 0xd0, 0x31, 0x22, 0xa0, 0x41, 0x1c, 0x59, 0x60, 0xf4, 0xae, 0x10, 0x37, 0xec, 0x92, + 0x2d, 0xa3, 0x03, 0x3e, 0x44, 0x84, 0x81, 0xad, 0x87, 0x51, 0xc0, 0x02, 0xf5, 0xc2, 0x10, 0xad, + 0x93, 0xd0, 0xd1, 0x53, 0xb4, 0x9e, 0xa2, 0xd7, 0x5f, 0xe9, 0x38, 0xac, 0x1b, 0x1f, 0xe8, 0x56, + 0xe0, 0x19, 0x9d, 0xa0, 0x13, 0x18, 0x42, 0xe9, 0x20, 0xbe, 0x2f, 0x56, 0x62, 0x21, 0xfe, 0x1b, + 0x92, 0xad, 0x6b, 0x39, 0xd3, 0x56, 0x10, 0x71, 0xb3, 0x45, 0x83, 0xeb, 0xaf, 0x8d, 0x30, 0x1e, + 0xb1, 0xba, 0x8e, 0x0f, 0xd1, 0x03, 0x23, 0x3c, 0xec, 0xf0, 0x0d, 0x6a, 0x78, 0xc0, 0xc8, 0x24, + 0x2d, 0xa3, 0x4c, 0x2b, 0x8a, 0x7d, 0xe6, 0x78, 0x30, 0xa6, 0x70, 0xf5, 0xdf, 0x14, 0xa8, 0xd5, + 0x05, 0x8f, 0x14, 0xf5, 0xb4, 0xef, 0x2a, 0x68, 0x75, 0xdb, 0x75, 0x03, 0x8b, 0x30, 0x27, 0xf0, + 0x31, 0xd0, 0xd8, 0x65, 0x6a, 0x80, 0x4e, 0xa7, 0xb9, 0x79, 0x9f, 0xf8, 0xb6, 0x0b, 0xb4, 0xa1, + 0xb4, 0xaa, 0x9b, 0xcb, 0x5b, 0x97, 0xf4, 0x69, 0xe9, 0xd3, 0xb1, 0xa4, 0x64, 0xbe, 0xf8, 0xb8, + 0xbf, 0xb1, 0x30, 0xe8, 0x6f, 0x9c, 0x96, 0xf7, 0x29, 0x2e, 0xb2, 0xab, 0x07, 0x68, 0x95, 0xf4, + 0x88, 0xe3, 0x92, 0x03, 0x17, 0x6e, 0xf9, 0x7b, 0x81, 0x0d, 0xb4, 0x51, 0x69, 0x29, 0x9b, 0xcb, + 0x5b, 0xad, 0xbc, 0x45, 0x9e, 0x63, 0xbd, 0x77, 0x45, 0xe7, 0x80, 0x36, 0xb8, 0x60, 0xb1, 0x20, + 0x32, 0xcf, 0x0e, 0xfa, 0x1b, 0xab, 0xdb, 0x05, 0x6d, 0x3c, 0xc6, 0xa7, 0x1a, 0x68, 0x89, 0x76, + 0x49, 0x04, 0x7c, 0xaf, 0x51, 0x6d, 0x29, 0x9b, 0x8b, 0xe6, 0x5a, 0xe2, 0xe0, 0x52, 0x3b, 0x15, + 0xe0, 0x11, 0x46, 0xfb, 0xa9, 0x82, 0xce, 0xee, 0x07, 0x76, 0xdb, 0xea, 0x82, 0x1d, 0xbb, 0x8e, + 0xdf, 0xb9, 0x1e, 0xf8, 0x0c, 0xbe, 0x66, 0xea, 0x3d, 0xb4, 0xc8, 0xeb, 0x66, 0x13, 0x46, 0x1a, + 0x8a, 0xf0, 0xf2, 0x72, 0xce, 0xcb, 0x2c, 0xfd, 0x7a, 0x78, 0xd8, 0xe1, 0x1b, 0x54, 0xe7, 0x68, + 0xee, 0xf7, 0xad, 0x83, 0x2f, 0xc0, 0x62, 0x37, 0x81, 0x11, 0x53, 0x4d, 0x4c, 0xa3, 0xd1, 0x1e, + 0xce, 0x58, 0xd5, 0x3b, 0xa8, 0x46, 0x43, 0xb0, 0x92, 0x1c, 0x5c, 0x9d, 0x9e, 0xf5, 0x49, 0x3e, + 0xb6, 0x43, 0xb0, 0xcc, 0x53, 0x89, 0x8d, 0x1a, 0x5f, 0x61, 0xc1, 0xa8, 0xde, 0x43, 0x27, 0x29, + 0x23, 0x2c, 0xa6, 0x22, 0x05, 0xcb, 0x5b, 0xd7, 0x8e, 0xc1, 0x2d, 0xf4, 0xcd, 0x7a, 0xc2, 0x7e, + 0x72, 0xb8, 0xc6, 0x09, 0xaf, 0xf6, 0xab, 0x82, 0x1a, 0x93, 0xd4, 0x3e, 0x74, 0x28, 0x53, 0x3f, + 0x1b, 0x4b, 0x9d, 0x7e, 0xb4, 0xd4, 0x71, 0x6d, 0x91, 0xb8, 0xd5, 0xc4, 0xec, 0x62, 0xba, 0x93, + 0x4b, 0xdb, 0x27, 0xe8, 0x84, 0xc3, 0xc0, 0xe3, 0x67, 0x87, 0x9f, 0xd6, 0xad, 0xd9, 0x63, 0x33, + 0x57, 0x12, 0xfa, 0x13, 0xbb, 0x9c, 0x08, 0x0f, 0xf9, 0xb4, 0x47, 0x25, 0x31, 0xf1, 0xc4, 0xaa, + 0xd7, 0xd0, 0x29, 0x2a, 0x0e, 0x23, 0xd8, 0xfc, 0xa4, 0x89, 0xb8, 0x96, 0xcc, 0xb3, 0x09, 0xd1, + 0xa9, 0x76, 0x4e, 0x86, 0x25, 0xa4, 0xfa, 0x06, 0xaa, 0x87, 0x01, 0x03, 0x9f, 0x39, 0xc4, 0x4d, + 0x0f, 0x7d, 0x75, 0x73, 0xc9, 0x54, 0x07, 0xfd, 0x8d, 0xfa, 0xbe, 0x24, 0xc1, 0x05, 0xa4, 0xf6, + 0xbd, 0x82, 0xd6, 0xcb, 0xab, 0xa3, 0x7e, 0x83, 0xea, 0x69, 0xc4, 0xd7, 0x5d, 0xe2, 0x78, 0x69, + 0x07, 0xbf, 0x79, 0xb4, 0x0e, 0x16, 0x3a, 0x23, 0xee, 0xa4, 0xe4, 0x2f, 0x24, 0x31, 0xd5, 0x25, + 0x18, 0xc5, 0x05, 0x53, 0xda, 0x0f, 0x15, 0xb4, 0x22, 0x41, 0xe6, 0xd0, 0x32, 0x1f, 0x49, 0x2d, + 0x63, 0xcc, 0x12, 0x66, 0x59, 0xaf, 0xdc, 0x2d, 0xf4, 0xca, 0x95, 0x59, 0x48, 0xa7, 0x37, 0xc9, + 0x40, 0x41, 0x4d, 0x09, 0x7f, 0x3d, 0xf0, 0x69, 0xec, 0x41, 0x84, 0xe1, 0x3e, 0x44, 0xe0, 0x5b, + 0xa0, 0x5e, 0x42, 0x8b, 0x24, 0x74, 0x6e, 0x44, 0x41, 0x1c, 0x26, 0x47, 0x2a, 0x3b, 0xfa, 0xdb, + 0xfb, 0xbb, 0x62, 0x1f, 0x67, 0x08, 0x8e, 0x4e, 0x3d, 0x12, 0xde, 0xe6, 0xd0, 0xa9, 0x1d, 0x9c, + 0x21, 0xd4, 0x16, 0xaa, 0xf9, 0xc4, 0x83, 0x46, 0x4d, 0x20, 0xb3, 0xd8, 0xf7, 0x88, 0x07, 0x58, + 0x48, 0x54, 0x13, 0x55, 0x63, 0xc7, 0x6e, 0x9c, 0x10, 0x80, 0xcb, 0x09, 0xa0, 0x7a, 0x7b, 0x77, + 0xe7, 0x79, 0x7f, 0xe3, 0xa5, 0xb2, 0xbb, 0x86, 0x3d, 0x08, 0x81, 0xea, 0xb7, 0x77, 0x77, 0x30, + 0x57, 0xd6, 0x7e, 0x56, 0xd0, 0x9a, 0x14, 0xe4, 0x1c, 0x46, 0xc0, 0xbe, 0x3c, 0x02, 0x5e, 0x9e, + 0xa1, 0x64, 0x25, 0xbd, 0xff, 0xad, 0x82, 0x5a, 0x12, 0x6e, 0x9f, 0x44, 0xc4, 0x03, 0x06, 0x11, + 0x3d, 0x6e, 0xb1, 0x5a, 0xa8, 0x76, 0xe8, 0xf8, 0xb6, 0x38, 0xab, 0xb9, 0xf4, 0x7f, 0xe0, 0xf8, + 0x36, 0x16, 0x92, 0xac, 0x40, 0xd5, 0xb2, 0x02, 0x69, 0x0f, 0x15, 0x74, 0x71, 0x6a, 0xb7, 0x66, + 0x1c, 0x4a, 0x69, 0x91, 0xdf, 0x46, 0xa7, 0x63, 0x9f, 0xc6, 0x0e, 0xe3, 0xf7, 0x5d, 0x7e, 0x00, + 0x9d, 0xe1, 0xb7, 0xf6, 0x6d, 0x59, 0x84, 0x8b, 0x58, 0xed, 0xc7, 0x4a, 0xa1, 0xbe, 0x62, 0x1c, + 0xde, 0x40, 0x6b, 0xb9, 0x71, 0x40, 0xe9, 0xde, 0xc8, 0x87, 0xf3, 0x89, 0x0f, 0x79, 0xad, 0x21, + 0x00, 0x8f, 0xeb, 0xa8, 0x5f, 0xa1, 0x95, 0x30, 0x9f, 0xea, 0xa4, 0xb5, 0xdf, 0x99, 0xa1, 0xa4, + 0x13, 0x4a, 0x65, 0xae, 0x0d, 0xfa, 0x1b, 0x2b, 0x92, 0x00, 0xcb, 0x76, 0xd4, 0x7d, 0x54, 0x27, + 0xd9, 0x93, 0xe8, 0x26, 0x1f, 0xe9, 0xc3, 0x32, 0x6c, 0xa6, 0xe3, 0x6f, 0x5b, 0x92, 0x3e, 0x1f, + 0xdb, 0xc1, 0x05, 0x7d, 0xed, 0xaf, 0x0a, 0x3a, 0x33, 0x61, 0x3c, 0xa8, 0x5b, 0x08, 0xd9, 0x91, + 0xd3, 0x83, 0x28, 0x97, 0xa4, 0x6c, 0xcc, 0xed, 0x64, 0x12, 0x9c, 0x43, 0xa9, 0x9f, 0x23, 0x34, + 0x62, 0x4f, 0x72, 0xa2, 0x4f, 0xcf, 0x49, 0xf1, 0x81, 0x67, 0xd6, 0x39, 0x7f, 0x6e, 0x37, 0xc7, + 0xa8, 0x52, 0xb4, 0x1c, 0x01, 0x85, 0xa8, 0x07, 0xf6, 0x7b, 0x41, 0xd4, 0xa8, 0x8a, 0x3e, 0x7a, + 0x6b, 0x86, 0xa4, 0x8f, 0x8d, 0x32, 0xf3, 0x4c, 0x12, 0xd2, 0x32, 0x1e, 0x11, 0xe3, 0xbc, 0x15, + 0xb5, 0x8d, 0xce, 0xd9, 0x40, 0x72, 0x6e, 0x7e, 0x19, 0x03, 0x65, 0x60, 0x8b, 0x09, 0xb5, 0x68, + 0x5e, 0x4c, 0x08, 0xce, 0xed, 0x4c, 0x02, 0xe1, 0xc9, 0xba, 0xda, 0xef, 0x0a, 0x3a, 0x27, 0x79, + 0xf6, 0x31, 0x78, 0xa1, 0x4b, 0x18, 0xcc, 0xe1, 0x3a, 0xba, 0x2b, 0x5d, 0x47, 0xaf, 0xcf, 0x90, + 0xbe, 0xd4, 0xc9, 0xb2, 0x6b, 0x49, 0xfb, 0x4d, 0x41, 0xe7, 0x27, 0x6a, 0xcc, 0x61, 0xbc, 0xde, + 0x91, 0xc7, 0xeb, 0xab, 0xc7, 0x88, 0xab, 0x64, 0xcc, 0x3e, 0x29, 0x8b, 0xaa, 0x3d, 0x7c, 0xb6, + 0xfe, 0xff, 0xde, 0x0f, 0xda, 0xdf, 0xf2, 0x33, 0x88, 0xd2, 0x39, 0x84, 0x21, 0x4f, 0x94, 0xca, + 0x91, 0x26, 0xca, 0xd8, 0xa0, 0xad, 0xce, 0x38, 0x68, 0x29, 0x3d, 0xde, 0xa0, 0xbd, 0x8b, 0x56, + 0xe4, 0xdb, 0xa7, 0x76, 0xc4, 0x6f, 0x3e, 0x41, 0xdd, 0x96, 0x6e, 0x27, 0x99, 0xa9, 0xf8, 0xf6, + 0xa0, 0xf4, 0xbf, 0xfc, 0xf6, 0xa0, 0xb4, 0xa4, 0x29, 0x7e, 0x91, 0xdf, 0x1e, 0x13, 0xf3, 0x3c, + 0xff, 0xb7, 0x07, 0xff, 0x94, 0xe6, 0x7f, 0x69, 0x48, 0xac, 0xf4, 0x0d, 0x99, 0x7d, 0x4a, 0xef, + 0xa5, 0x02, 0x3c, 0xc2, 0x68, 0xf7, 0x51, 0x5d, 0xfe, 0x0d, 0xe0, 0x58, 0x37, 0x5f, 0x0b, 0xd5, + 0x44, 0xe5, 0x0a, 0xae, 0xef, 0x10, 0x46, 0xb0, 0x90, 0x98, 0xe6, 0xe3, 0x67, 0xcd, 0x85, 0x27, + 0xcf, 0x9a, 0x0b, 0x4f, 0x9f, 0x35, 0x17, 0x1e, 0x0e, 0x9a, 0xca, 0xe3, 0x41, 0x53, 0x79, 0x32, + 0x68, 0x2a, 0x4f, 0x07, 0x4d, 0xe5, 0x8f, 0x41, 0x53, 0x79, 0xf4, 0x67, 0x73, 0xe1, 0xd3, 0x0b, + 0xd3, 0x7e, 0x31, 0xfa, 0x27, 0x00, 0x00, 0xff, 0xff, 0x67, 0xe4, 0xf6, 0x18, 0x69, 0x12, 0x00, + 0x00, } func (m *AllocationResult) Marshal() (dAtA []byte, err error) { @@ -693,15 +726,24 @@ func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - i -= len(m.ResourceHandle) - copy(dAtA[i:], m.ResourceHandle) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceHandle))) - i-- - dAtA[i] = 0xa + if len(m.ResourceHandles) > 0 { + for iNdEx := len(m.ResourceHandles) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceHandles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *PodScheduling) Marshal() (dAtA []byte, err error) { +func (m *PodSchedulingContext) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -711,12 +753,12 @@ func (m *PodScheduling) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodScheduling) MarshalTo(dAtA []byte) (int, error) { +func (m *PodSchedulingContext) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodScheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PodSchedulingContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -754,7 +796,7 @@ func (m *PodScheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSchedulingList) Marshal() (dAtA []byte, err error) { +func (m *PodSchedulingContextList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -764,12 +806,12 @@ func (m *PodSchedulingList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSchedulingList) MarshalTo(dAtA []byte) (int, error) { +func (m *PodSchedulingContextList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSchedulingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PodSchedulingContextList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -801,7 +843,7 @@ func (m *PodSchedulingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSchedulingSpec) Marshal() (dAtA []byte, err error) { +func (m *PodSchedulingContextSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -811,12 +853,12 @@ func (m *PodSchedulingSpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSchedulingSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *PodSchedulingContextSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSchedulingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PodSchedulingContextSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -838,7 +880,7 @@ func (m *PodSchedulingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSchedulingStatus) Marshal() (dAtA []byte, err error) { +func (m *PodSchedulingContextStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -848,12 +890,12 @@ func (m *PodSchedulingStatus) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSchedulingStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *PodSchedulingContextStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSchedulingStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PodSchedulingContextStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -1485,6 +1527,39 @@ func (m *ResourceClassParametersReference) MarshalToSizedBuffer(dAtA []byte) (in return len(dAtA) - i, nil } +func (m *ResourceHandle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceHandle) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceHandle) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + i -= len(m.DriverName) + copy(dAtA[i:], m.DriverName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { offset -= sovGenerated(v) base := offset @@ -1502,8 +1577,12 @@ func (m *AllocationResult) Size() (n int) { } var l int _ = l - l = len(m.ResourceHandle) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.ResourceHandles) > 0 { + for _, e := range m.ResourceHandles { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } if m.AvailableOnNodes != nil { l = m.AvailableOnNodes.Size() n += 1 + l + sovGenerated(uint64(l)) @@ -1512,7 +1591,7 @@ func (m *AllocationResult) Size() (n int) { return n } -func (m *PodScheduling) Size() (n int) { +func (m *PodSchedulingContext) Size() (n int) { if m == nil { return 0 } @@ -1527,7 +1606,7 @@ func (m *PodScheduling) Size() (n int) { return n } -func (m *PodSchedulingList) Size() (n int) { +func (m *PodSchedulingContextList) Size() (n int) { if m == nil { return 0 } @@ -1544,7 +1623,7 @@ func (m *PodSchedulingList) Size() (n int) { return n } -func (m *PodSchedulingSpec) Size() (n int) { +func (m *PodSchedulingContextSpec) Size() (n int) { if m == nil { return 0 } @@ -1561,7 +1640,7 @@ func (m *PodSchedulingSpec) Size() (n int) { return n } -func (m *PodSchedulingStatus) Size() (n int) { +func (m *PodSchedulingContextStatus) Size() (n int) { if m == nil { return 0 } @@ -1794,6 +1873,19 @@ func (m *ResourceClassParametersReference) Size() (n int) { return n } +func (m *ResourceHandle) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DriverName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Data) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func sovGenerated(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1804,54 +1896,59 @@ func (this *AllocationResult) String() string { if this == nil { return "nil" } + repeatedStringForResourceHandles := "[]ResourceHandle{" + for _, f := range this.ResourceHandles { + repeatedStringForResourceHandles += strings.Replace(strings.Replace(f.String(), "ResourceHandle", "ResourceHandle", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceHandles += "}" s := strings.Join([]string{`&AllocationResult{`, - `ResourceHandle:` + fmt.Sprintf("%v", this.ResourceHandle) + `,`, + `ResourceHandles:` + repeatedStringForResourceHandles + `,`, `AvailableOnNodes:` + strings.Replace(fmt.Sprintf("%v", this.AvailableOnNodes), "NodeSelector", "v1.NodeSelector", 1) + `,`, `Shareable:` + fmt.Sprintf("%v", this.Shareable) + `,`, `}`, }, "") return s } -func (this *PodScheduling) String() string { +func (this *PodSchedulingContext) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&PodScheduling{`, + s := strings.Join([]string{`&PodSchedulingContext{`, `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSchedulingSpec", "PodSchedulingSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSchedulingStatus", "PodSchedulingStatus", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSchedulingContextSpec", "PodSchedulingContextSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSchedulingContextStatus", "PodSchedulingContextStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *PodSchedulingList) String() string { +func (this *PodSchedulingContextList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]PodScheduling{" + repeatedStringForItems := "[]PodSchedulingContext{" for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodScheduling", "PodScheduling", 1), `&`, ``, 1) + "," + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSchedulingContext", "PodSchedulingContext", 1), `&`, ``, 1) + "," } repeatedStringForItems += "}" - s := strings.Join([]string{`&PodSchedulingList{`, + s := strings.Join([]string{`&PodSchedulingContextList{`, `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s } -func (this *PodSchedulingSpec) String() string { +func (this *PodSchedulingContextSpec) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&PodSchedulingSpec{`, + s := strings.Join([]string{`&PodSchedulingContextSpec{`, `SelectedNode:` + fmt.Sprintf("%v", this.SelectedNode) + `,`, `PotentialNodes:` + fmt.Sprintf("%v", this.PotentialNodes) + `,`, `}`, }, "") return s } -func (this *PodSchedulingStatus) String() string { +func (this *PodSchedulingContextStatus) String() string { if this == nil { return "nil" } @@ -1860,7 +1957,7 @@ func (this *PodSchedulingStatus) String() string { repeatedStringForResourceClaims += strings.Replace(strings.Replace(f.String(), "ResourceClaimSchedulingStatus", "ResourceClaimSchedulingStatus", 1), `&`, ``, 1) + "," } repeatedStringForResourceClaims += "}" - s := strings.Join([]string{`&PodSchedulingStatus{`, + s := strings.Join([]string{`&PodSchedulingContextStatus{`, `ResourceClaims:` + repeatedStringForResourceClaims + `,`, `}`, }, "") @@ -2040,6 +2137,17 @@ func (this *ResourceClassParametersReference) String() string { }, "") return s } +func (this *ResourceHandle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceHandle{`, + `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -2079,9 +2187,9 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceHandle", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceHandles", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2091,23 +2199,25 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceHandle = string(dAtA[iNdEx:postIndex]) + m.ResourceHandles = append(m.ResourceHandles, ResourceHandle{}) + if err := m.ResourceHandles[len(m.ResourceHandles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { @@ -2186,7 +2296,7 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodScheduling) Unmarshal(dAtA []byte) error { +func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2209,10 +2319,10 @@ func (m *PodScheduling) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodScheduling: wiretype end group for non-group") + return fmt.Errorf("proto: PodSchedulingContext: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodScheduling: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodSchedulingContext: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -2335,7 +2445,7 @@ func (m *PodScheduling) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSchedulingList) Unmarshal(dAtA []byte) error { +func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2358,10 +2468,10 @@ func (m *PodSchedulingList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingList: wiretype end group for non-group") + return fmt.Errorf("proto: PodSchedulingContextList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodSchedulingContextList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -2426,7 +2536,7 @@ func (m *PodSchedulingList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, PodScheduling{}) + m.Items = append(m.Items, PodSchedulingContext{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2452,7 +2562,7 @@ func (m *PodSchedulingList) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSchedulingSpec) Unmarshal(dAtA []byte) error { +func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2475,10 +2585,10 @@ func (m *PodSchedulingSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingSpec: wiretype end group for non-group") + return fmt.Errorf("proto: PodSchedulingContextSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodSchedulingContextSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -2566,7 +2676,7 @@ func (m *PodSchedulingSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSchedulingStatus) Unmarshal(dAtA []byte) error { +func (m *PodSchedulingContextStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2589,10 +2699,10 @@ func (m *PodSchedulingStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingStatus: wiretype end group for non-group") + return fmt.Errorf("proto: PodSchedulingContextStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodSchedulingContextStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -4507,6 +4617,120 @@ func (m *ResourceClassParametersReference) Unmarshal(dAtA []byte) error { } return nil } +func (m *ResourceHandle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceHandle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceHandle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DriverName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/resource/v1alpha1/generated.proto b/vendor/k8s.io/api/resource/v1alpha2/generated.proto similarity index 79% rename from vendor/k8s.io/api/resource/v1alpha1/generated.proto rename to vendor/k8s.io/api/resource/v1alpha2/generated.proto index 2e814d155..02412398c 100644 --- a/vendor/k8s.io/api/resource/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/resource/v1alpha2/generated.proto @@ -19,7 +19,7 @@ limitations under the License. syntax = "proto2"; -package k8s.io.api.resource.v1alpha1; +package k8s.io.api.resource.v1alpha2; import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; @@ -27,23 +27,30 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "k8s.io/api/resource/v1alpha1"; +option go_package = "k8s.io/api/resource/v1alpha2"; -// AllocationResult contains attributed of an allocated resource. +// AllocationResult contains attributes of an allocated resource. message AllocationResult { - // ResourceHandle contains arbitrary data returned by the driver after a - // successful allocation. This is opaque for - // Kubernetes. Driver documentation may explain to users how to - // interpret this data if needed. + // ResourceHandles contain the state associated with an allocation that + // should be maintained throughout the lifetime of a claim. Each + // ResourceHandle contains data that should be passed to a specific kubelet + // plugin once it lands on a node. This data is returned by the driver + // after a successful allocation and is opaque to Kubernetes. Driver + // documentation may explain to users how to interpret this data if needed. // - // The maximum size of this field is 16KiB. This may get - // increased in the future, but not reduced. + // Setting this field is optional. It has a maximum size of 32 entries. + // If null (or empty), it is assumed this allocation will be processed by a + // single kubelet plugin with no ResourceHandle data attached. The name of + // the kubelet plugin invoked will match the DriverName set in the + // ResourceClaimStatus this AllocationResult is embedded in. + // + // +listType=atomic // +optional - optional string resourceHandle = 1; + repeated ResourceHandle resourceHandles = 1; - // This field will get set by the resource driver after it has - // allocated the resource driver to inform the scheduler where it can - // schedule Pods using the ResourceClaim. + // This field will get set by the resource driver after it has allocated + // the resource to inform the scheduler where it can schedule Pods using + // the ResourceClaim. // // Setting this field is optional. If null, the resource is available // everywhere. @@ -56,37 +63,37 @@ message AllocationResult { optional bool shareable = 3; } -// PodScheduling objects hold information that is needed to schedule +// PodSchedulingContext objects hold information that is needed to schedule // a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation // mode. // // This is an alpha type and requires enabling the DynamicResourceAllocation // feature gate. -message PodScheduling { +message PodSchedulingContext { // Standard object metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec describes where resources for the Pod are needed. - optional PodSchedulingSpec spec = 2; + optional PodSchedulingContextSpec spec = 2; // Status describes where resources for the Pod can be allocated. // +optional - optional PodSchedulingStatus status = 3; + optional PodSchedulingContextStatus status = 3; } -// PodSchedulingList is a collection of Pod scheduling objects. -message PodSchedulingList { +// PodSchedulingContextList is a collection of Pod scheduling objects. +message PodSchedulingContextList { // Standard list metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of PodScheduling objects. - repeated PodScheduling items = 2; + // Items is the list of PodSchedulingContext objects. + repeated PodSchedulingContext items = 2; } -// PodSchedulingSpec describes where resources for the Pod are needed. -message PodSchedulingSpec { +// PodSchedulingContextSpec describes where resources for the Pod are needed. +message PodSchedulingContextSpec { // SelectedNode is the node for which allocation of ResourceClaims that // are referenced by the Pod and that use "WaitForFirstConsumer" // allocation is to be attempted. @@ -105,8 +112,8 @@ message PodSchedulingSpec { repeated string potentialNodes = 2; } -// PodSchedulingStatus describes where resources for the Pod can be allocated. -message PodSchedulingStatus { +// PodSchedulingContextStatus describes where resources for the Pod can be allocated. +message PodSchedulingContextStatus { // ResourceClaims describes resource availability for each // pod.spec.resourceClaim entry where the corresponding ResourceClaim // uses "WaitForFirstConsumer" allocation mode. @@ -235,9 +242,9 @@ message ResourceClaimStatus { // +optional optional string driverName = 1; - // Allocation is set by the resource driver once a resource has been - // allocated successfully. If this is not specified, the resource is - // not yet allocated. + // Allocation is set by the resource driver once a resource or set of + // resources has been allocated successfully. If this is not specified, the + // resources have not been allocated yet. // +optional optional AllocationResult allocation = 2; @@ -370,3 +377,24 @@ message ResourceClassParametersReference { optional string namespace = 4; } +// ResourceHandle holds opaque resource data for processing by a specific kubelet plugin. +message ResourceHandle { + // DriverName specifies the name of the resource driver whose kubelet + // plugin should be invoked to process this ResourceHandle's data once it + // lands on a node. This may differ from the DriverName set in + // ResourceClaimStatus this ResourceHandle is embedded in. + optional string driverName = 1; + + // Data contains the opaque data associated with this ResourceHandle. It is + // set by the controller component of the resource driver whose name + // matches the DriverName set in the ResourceClaimStatus this + // ResourceHandle is embedded in. It is set at allocation time and is + // intended for processing by the kubelet plugin whose name matches + // the DriverName set in this ResourceHandle. + // + // The maximum size of this field is 16KiB. This may get increased in the + // future, but not reduced. + // +optional + optional string data = 2; +} + diff --git a/vendor/k8s.io/api/resource/v1alpha1/register.go b/vendor/k8s.io/api/resource/v1alpha2/register.go similarity index 95% rename from vendor/k8s.io/api/resource/v1alpha1/register.go rename to vendor/k8s.io/api/resource/v1alpha2/register.go index 8245b9aee..6e0d7ceb9 100644 --- a/vendor/k8s.io/api/resource/v1alpha1/register.go +++ b/vendor/k8s.io/api/resource/v1alpha2/register.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1alpha2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,7 +26,7 @@ import ( const GroupName = "resource.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"} // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { @@ -50,8 +50,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ResourceClaimList{}, &ResourceClaimTemplate{}, &ResourceClaimTemplateList{}, - &PodScheduling{}, - &PodSchedulingList{}, + &PodSchedulingContext{}, + &PodSchedulingContextList{}, ) // Add common types diff --git a/vendor/k8s.io/api/resource/v1alpha1/types.go b/vendor/k8s.io/api/resource/v1alpha2/types.go similarity index 81% rename from vendor/k8s.io/api/resource/v1alpha1/types.go rename to vendor/k8s.io/api/resource/v1alpha2/types.go index af5703840..21936bfe3 100644 --- a/vendor/k8s.io/api/resource/v1alpha1/types.go +++ b/vendor/k8s.io/api/resource/v1alpha2/types.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1alpha2 import ( v1 "k8s.io/api/core/v1" @@ -99,9 +99,9 @@ type ResourceClaimStatus struct { // +optional DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"` - // Allocation is set by the resource driver once a resource has been - // allocated successfully. If this is not specified, the resource is - // not yet allocated. + // Allocation is set by the resource driver once a resource or set of + // resources has been allocated successfully. If this is not specified, the + // resources have not been allocated yet. // +optional Allocation *AllocationResult `json:"allocation,omitempty" protobuf:"bytes,2,opt,name=allocation"` @@ -133,21 +133,28 @@ type ResourceClaimStatus struct { // claim.status.reservedFor. const ResourceClaimReservedForMaxSize = 32 -// AllocationResult contains attributed of an allocated resource. +// AllocationResult contains attributes of an allocated resource. type AllocationResult struct { - // ResourceHandle contains arbitrary data returned by the driver after a - // successful allocation. This is opaque for - // Kubernetes. Driver documentation may explain to users how to - // interpret this data if needed. + // ResourceHandles contain the state associated with an allocation that + // should be maintained throughout the lifetime of a claim. Each + // ResourceHandle contains data that should be passed to a specific kubelet + // plugin once it lands on a node. This data is returned by the driver + // after a successful allocation and is opaque to Kubernetes. Driver + // documentation may explain to users how to interpret this data if needed. // - // The maximum size of this field is 16KiB. This may get - // increased in the future, but not reduced. + // Setting this field is optional. It has a maximum size of 32 entries. + // If null (or empty), it is assumed this allocation will be processed by a + // single kubelet plugin with no ResourceHandle data attached. The name of + // the kubelet plugin invoked will match the DriverName set in the + // ResourceClaimStatus this AllocationResult is embedded in. + // + // +listType=atomic // +optional - ResourceHandle string `json:"resourceHandle,omitempty" protobuf:"bytes,1,opt,name=resourceHandle"` + ResourceHandles []ResourceHandle `json:"resourceHandles,omitempty" protobuf:"bytes,1,opt,name=resourceHandles"` - // This field will get set by the resource driver after it has - // allocated the resource driver to inform the scheduler where it can - // schedule Pods using the ResourceClaim. + // This field will get set by the resource driver after it has allocated + // the resource to inform the scheduler where it can schedule Pods using + // the ResourceClaim. // // Setting this field is optional. If null, the resource is available // everywhere. @@ -160,8 +167,33 @@ type AllocationResult struct { Shareable bool `json:"shareable,omitempty" protobuf:"varint,3,opt,name=shareable"` } -// ResourceHandleMaxSize is the maximum size of allocation.resourceHandle. -const ResourceHandleMaxSize = 16 * 1024 +// AllocationResultResourceHandlesMaxSize represents the maximum number of +// entries in allocation.resourceHandles. +const AllocationResultResourceHandlesMaxSize = 32 + +// ResourceHandle holds opaque resource data for processing by a specific kubelet plugin. +type ResourceHandle struct { + // DriverName specifies the name of the resource driver whose kubelet + // plugin should be invoked to process this ResourceHandle's data once it + // lands on a node. This may differ from the DriverName set in + // ResourceClaimStatus this ResourceHandle is embedded in. + DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"` + + // Data contains the opaque data associated with this ResourceHandle. It is + // set by the controller component of the resource driver whose name + // matches the DriverName set in the ResourceClaimStatus this + // ResourceHandle is embedded in. It is set at allocation time and is + // intended for processing by the kubelet plugin whose name matches + // the DriverName set in this ResourceHandle. + // + // The maximum size of this field is 16KiB. This may get increased in the + // future, but not reduced. + // +optional + Data string `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` +} + +// ResourceHandleDataMaxSize represents the maximum size of resourceHandle.data. +const ResourceHandleDataMaxSize = 16 * 1024 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.26 @@ -181,28 +213,28 @@ type ResourceClaimList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.26 -// PodScheduling objects hold information that is needed to schedule +// PodSchedulingContext objects hold information that is needed to schedule // a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation // mode. // // This is an alpha type and requires enabling the DynamicResourceAllocation // feature gate. -type PodScheduling struct { +type PodSchedulingContext struct { metav1.TypeMeta `json:",inline"` // Standard object metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec describes where resources for the Pod are needed. - Spec PodSchedulingSpec `json:"spec" protobuf:"bytes,2,name=spec"` + Spec PodSchedulingContextSpec `json:"spec" protobuf:"bytes,2,name=spec"` // Status describes where resources for the Pod can be allocated. // +optional - Status PodSchedulingStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` + Status PodSchedulingContextStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } -// PodSchedulingSpec describes where resources for the Pod are needed. -type PodSchedulingSpec struct { +// PodSchedulingContextSpec describes where resources for the Pod are needed. +type PodSchedulingContextSpec struct { // SelectedNode is the node for which allocation of ResourceClaims that // are referenced by the Pod and that use "WaitForFirstConsumer" // allocation is to be attempted. @@ -221,8 +253,8 @@ type PodSchedulingSpec struct { PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"` } -// PodSchedulingStatus describes where resources for the Pod can be allocated. -type PodSchedulingStatus struct { +// PodSchedulingContextStatus describes where resources for the Pod can be allocated. +type PodSchedulingContextStatus struct { // ResourceClaims describes resource availability for each // pod.spec.resourceClaim entry where the corresponding ResourceClaim // uses "WaitForFirstConsumer" allocation mode. @@ -257,22 +289,22 @@ type ResourceClaimSchedulingStatus struct { } // PodSchedulingNodeListMaxSize defines the maximum number of entries in the -// node lists that are stored in PodScheduling objects. This limit is part +// node lists that are stored in PodSchedulingContext objects. This limit is part // of the API. const PodSchedulingNodeListMaxSize = 128 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.26 -// PodSchedulingList is a collection of Pod scheduling objects. -type PodSchedulingList struct { +// PodSchedulingContextList is a collection of Pod scheduling objects. +type PodSchedulingContextList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of PodScheduling objects. - Items []PodScheduling `json:"items" protobuf:"bytes,2,rep,name=items"` + // Items is the list of PodSchedulingContext objects. + Items []PodSchedulingContext `json:"items" protobuf:"bytes,2,rep,name=items"` } // +genclient diff --git a/vendor/k8s.io/api/resource/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go similarity index 76% rename from vendor/k8s.io/api/resource/v1alpha1/types_swagger_doc_generated.go rename to vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go index 6836dbfb6..474be8c85 100644 --- a/vendor/k8s.io/api/resource/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1alpha2 // This file contains a collection of methods that can be used from go-restful to // generate Swagger API documentation for its models. Please read this PR for more @@ -24,13 +24,13 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AllocationResult = map[string]string{ - "": "AllocationResult contains attributed of an allocated resource.", - "resourceHandle": "ResourceHandle contains arbitrary data returned by the driver after a successful allocation. This is opaque for Kubernetes. Driver documentation may explain to users how to interpret this data if needed.\n\nThe maximum size of this field is 16KiB. This may get increased in the future, but not reduced.", - "availableOnNodes": "This field will get set by the resource driver after it has allocated the resource driver to inform the scheduler where it can schedule Pods using the ResourceClaim.\n\nSetting this field is optional. If null, the resource is available everywhere.", + "": "AllocationResult contains attributes of an allocated resource.", + "resourceHandles": "ResourceHandles contain the state associated with an allocation that should be maintained throughout the lifetime of a claim. Each ResourceHandle contains data that should be passed to a specific kubelet plugin once it lands on a node. This data is returned by the driver after a successful allocation and is opaque to Kubernetes. Driver documentation may explain to users how to interpret this data if needed.\n\nSetting this field is optional. It has a maximum size of 32 entries. If null (or empty), it is assumed this allocation will be processed by a single kubelet plugin with no ResourceHandle data attached. The name of the kubelet plugin invoked will match the DriverName set in the ResourceClaimStatus this AllocationResult is embedded in.", + "availableOnNodes": "This field will get set by the resource driver after it has allocated the resource to inform the scheduler where it can schedule Pods using the ResourceClaim.\n\nSetting this field is optional. If null, the resource is available everywhere.", "shareable": "Shareable determines whether the resource supports more than one consumer at a time.", } @@ -38,44 +38,44 @@ func (AllocationResult) SwaggerDoc() map[string]string { return map_AllocationResult } -var map_PodScheduling = map[string]string{ - "": "PodScheduling objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", +var map_PodSchedulingContext = map[string]string{ + "": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", "metadata": "Standard object metadata", "spec": "Spec describes where resources for the Pod are needed.", "status": "Status describes where resources for the Pod can be allocated.", } -func (PodScheduling) SwaggerDoc() map[string]string { - return map_PodScheduling +func (PodSchedulingContext) SwaggerDoc() map[string]string { + return map_PodSchedulingContext } -var map_PodSchedulingList = map[string]string{ - "": "PodSchedulingList is a collection of Pod scheduling objects.", +var map_PodSchedulingContextList = map[string]string{ + "": "PodSchedulingContextList is a collection of Pod scheduling objects.", "metadata": "Standard list metadata", - "items": "Items is the list of PodScheduling objects.", + "items": "Items is the list of PodSchedulingContext objects.", } -func (PodSchedulingList) SwaggerDoc() map[string]string { - return map_PodSchedulingList +func (PodSchedulingContextList) SwaggerDoc() map[string]string { + return map_PodSchedulingContextList } -var map_PodSchedulingSpec = map[string]string{ - "": "PodSchedulingSpec describes where resources for the Pod are needed.", +var map_PodSchedulingContextSpec = map[string]string{ + "": "PodSchedulingContextSpec describes where resources for the Pod are needed.", "selectedNode": "SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted.", "potentialNodes": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.", } -func (PodSchedulingSpec) SwaggerDoc() map[string]string { - return map_PodSchedulingSpec +func (PodSchedulingContextSpec) SwaggerDoc() map[string]string { + return map_PodSchedulingContextSpec } -var map_PodSchedulingStatus = map[string]string{ - "": "PodSchedulingStatus describes where resources for the Pod can be allocated.", +var map_PodSchedulingContextStatus = map[string]string{ + "": "PodSchedulingContextStatus describes where resources for the Pod can be allocated.", "resourceClaims": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.", } -func (PodSchedulingStatus) SwaggerDoc() map[string]string { - return map_PodSchedulingStatus +func (PodSchedulingContextStatus) SwaggerDoc() map[string]string { + return map_PodSchedulingContextStatus } var map_ResourceClaim = map[string]string{ @@ -146,7 +146,7 @@ func (ResourceClaimSpec) SwaggerDoc() map[string]string { var map_ResourceClaimStatus = map[string]string{ "": "ResourceClaimStatus tracks whether the resource has been allocated and what the resulting attributes are.", "driverName": "DriverName is a copy of the driver name from the ResourceClass at the time when allocation started.", - "allocation": "Allocation is set by the resource driver once a resource has been allocated successfully. If this is not specified, the resource is not yet allocated.", + "allocation": "Allocation is set by the resource driver once a resource or set of resources has been allocated successfully. If this is not specified, the resources have not been allocated yet.", "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", "deallocationRequested": "DeallocationRequested indicates that a ResourceClaim is to be deallocated.\n\nThe driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nWhile DeallocationRequested is set, no new consumers may be added to ReservedFor.", } @@ -219,4 +219,14 @@ func (ResourceClassParametersReference) SwaggerDoc() map[string]string { return map_ResourceClassParametersReference } +var map_ResourceHandle = map[string]string{ + "": "ResourceHandle holds opaque resource data for processing by a specific kubelet plugin.", + "driverName": "DriverName specifies the name of the resource driver whose kubelet plugin should be invoked to process this ResourceHandle's data once it lands on a node. This may differ from the DriverName set in ResourceClaimStatus this ResourceHandle is embedded in.", + "data": "Data contains the opaque data associated with this ResourceHandle. It is set by the controller component of the resource driver whose name matches the DriverName set in the ResourceClaimStatus this ResourceHandle is embedded in. It is set at allocation time and is intended for processing by the kubelet plugin whose name matches the DriverName set in this ResourceHandle.\n\nThe maximum size of this field is 16KiB. This may get increased in the future, but not reduced.", +} + +func (ResourceHandle) SwaggerDoc() map[string]string { + return map_ResourceHandle +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/resource/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go similarity index 88% rename from vendor/k8s.io/api/resource/v1alpha1/zz_generated.deepcopy.go rename to vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go index c00fbfd1d..89d521bf0 100644 --- a/vendor/k8s.io/api/resource/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go @@ -19,7 +19,7 @@ limitations under the License. // Code generated by deepcopy-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( v1 "k8s.io/api/core/v1" @@ -29,6 +29,11 @@ import ( // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AllocationResult) DeepCopyInto(out *AllocationResult) { *out = *in + if in.ResourceHandles != nil { + in, out := &in.ResourceHandles, &out.ResourceHandles + *out = make([]ResourceHandle, len(*in)) + copy(*out, *in) + } if in.AvailableOnNodes != nil { in, out := &in.AvailableOnNodes, &out.AvailableOnNodes *out = new(v1.NodeSelector) @@ -48,7 +53,7 @@ func (in *AllocationResult) DeepCopy() *AllocationResult { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodScheduling) DeepCopyInto(out *PodScheduling) { +func (in *PodSchedulingContext) DeepCopyInto(out *PodSchedulingContext) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) @@ -57,18 +62,18 @@ func (in *PodScheduling) DeepCopyInto(out *PodScheduling) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodScheduling. -func (in *PodScheduling) DeepCopy() *PodScheduling { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContext. +func (in *PodSchedulingContext) DeepCopy() *PodSchedulingContext { if in == nil { return nil } - out := new(PodScheduling) + out := new(PodSchedulingContext) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodScheduling) DeepCopyObject() runtime.Object { +func (in *PodSchedulingContext) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -76,13 +81,13 @@ func (in *PodScheduling) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingList) DeepCopyInto(out *PodSchedulingList) { +func (in *PodSchedulingContextList) DeepCopyInto(out *PodSchedulingContextList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]PodScheduling, len(*in)) + *out = make([]PodSchedulingContext, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -90,18 +95,18 @@ func (in *PodSchedulingList) DeepCopyInto(out *PodSchedulingList) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingList. -func (in *PodSchedulingList) DeepCopy() *PodSchedulingList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextList. +func (in *PodSchedulingContextList) DeepCopy() *PodSchedulingContextList { if in == nil { return nil } - out := new(PodSchedulingList) + out := new(PodSchedulingContextList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSchedulingList) DeepCopyObject() runtime.Object { +func (in *PodSchedulingContextList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -109,7 +114,7 @@ func (in *PodSchedulingList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingSpec) DeepCopyInto(out *PodSchedulingSpec) { +func (in *PodSchedulingContextSpec) DeepCopyInto(out *PodSchedulingContextSpec) { *out = *in if in.PotentialNodes != nil { in, out := &in.PotentialNodes, &out.PotentialNodes @@ -119,18 +124,18 @@ func (in *PodSchedulingSpec) DeepCopyInto(out *PodSchedulingSpec) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingSpec. -func (in *PodSchedulingSpec) DeepCopy() *PodSchedulingSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextSpec. +func (in *PodSchedulingContextSpec) DeepCopy() *PodSchedulingContextSpec { if in == nil { return nil } - out := new(PodSchedulingSpec) + out := new(PodSchedulingContextSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingStatus) DeepCopyInto(out *PodSchedulingStatus) { +func (in *PodSchedulingContextStatus) DeepCopyInto(out *PodSchedulingContextStatus) { *out = *in if in.ResourceClaims != nil { in, out := &in.ResourceClaims, &out.ResourceClaims @@ -142,12 +147,12 @@ func (in *PodSchedulingStatus) DeepCopyInto(out *PodSchedulingStatus) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingStatus. -func (in *PodSchedulingStatus) DeepCopy() *PodSchedulingStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextStatus. +func (in *PodSchedulingContextStatus) DeepCopy() *PodSchedulingContextStatus { if in == nil { return nil } - out := new(PodSchedulingStatus) + out := new(PodSchedulingContextStatus) in.DeepCopyInto(out) return out } @@ -475,3 +480,19 @@ func (in *ResourceClassParametersReference) DeepCopy() *ResourceClassParametersR in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceHandle) DeepCopyInto(out *ResourceHandle) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceHandle. +func (in *ResourceHandle) DeepCopy() *ResourceHandle { + if in == nil { + return nil + } + out := new(ResourceHandle) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/scheduling/v1/generated.proto b/vendor/k8s.io/api/scheduling/v1/generated.proto index afc090777..c1a27e8ba 100644 --- a/vendor/k8s.io/api/scheduling/v1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1/generated.proto @@ -37,7 +37,7 @@ message PriorityClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. optional int32 value = 2; @@ -54,7 +54,7 @@ message PriorityClass { // +optional optional string description = 4; - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1/types.go b/vendor/k8s.io/api/scheduling/v1/types.go index 0f2989424..146bae40d 100644 --- a/vendor/k8s.io/api/scheduling/v1/types.go +++ b/vendor/k8s.io/api/scheduling/v1/types.go @@ -34,7 +34,7 @@ type PriorityClass struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` @@ -51,7 +51,7 @@ type PriorityClass struct { // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go index ac34c531f..f167e1970 100644 --- a/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "value": "value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", + "preemptionPolicy": "preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", } func (PriorityClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto index 5c60b7ab4..f0878fb16 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -38,7 +38,7 @@ message PriorityClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. optional int32 value = 2; @@ -55,7 +55,7 @@ message PriorityClass { // +optional optional string description = 4; - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types.go b/vendor/k8s.io/api/scheduling/v1alpha1/types.go index 7b0df4864..26ba8ff5d 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types.go @@ -35,7 +35,7 @@ type PriorityClass struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` @@ -52,7 +52,7 @@ type PriorityClass struct { // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go index fa25f969c..557005db6 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "value": "value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", + "preemptionPolicy": "preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", } func (PriorityClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto index 44b49ea24..43878184d 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto @@ -38,7 +38,7 @@ message PriorityClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. optional int32 value = 2; @@ -55,7 +55,7 @@ message PriorityClass { // +optional optional string description = 4; - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types.go b/vendor/k8s.io/api/scheduling/v1beta1/types.go index e315e1b35..6f88592cf 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/types.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/types.go @@ -39,7 +39,7 @@ type PriorityClass struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` @@ -56,7 +56,7 @@ type PriorityClass struct { // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go index cbc140f44..f42008eb9 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "value": "value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", + "preemptionPolicy": "preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", } func (PriorityClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index d3c425c04..5f8eccaef 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -46,7 +46,7 @@ message CSIDriver { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. optional CSIDriverSpec spec = 2; } @@ -79,16 +79,15 @@ message CSIDriverSpec { // +optional optional bool attachRequired = 1; - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name @@ -110,29 +109,27 @@ message CSIDriverSpec { optional bool podInfoOnMount = 2; // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html - // A driver can support one or more of these modes and - // more modes may be added in the future. - // This field is beta. + // A driver can support one or more of these modes and more modes may be added in the future. // + // This field is beta. // This field is immutable. // // +optional // +listType=set repeated string volumeLifecycleModes = 3; - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -149,7 +146,7 @@ message CSIDriverSpec { // +featureGate=CSIStorageCapacity optional bool storageCapacity = 4; - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // @@ -159,10 +156,11 @@ message CSIDriverSpec { // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional optional string fsGroupPolicy = 5; - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -182,7 +180,7 @@ message CSIDriverSpec { // +listType=atomic repeated TokenRequest tokenRequests = 6; - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -193,7 +191,7 @@ message CSIDriverSpec { // +optional optional bool requiresRepublish = 7; - // SELinuxMount specifies if the CSI driver supports "-o context" + // seLinuxMount specifies if the CSI driver supports "-o context" // mount option. // // When "true", the CSI driver must ensure that all volumes provided by this CSI @@ -211,6 +209,7 @@ message CSIDriverSpec { // // Default is "false". // + // +featureGate=SELinuxMountReadWriteOncePod // +optional optional bool seLinuxMount = 8; } @@ -225,6 +224,7 @@ message CSIDriverSpec { // enough that it doesn't create this object. // CSINode has an OwnerReference that points to the corresponding node object. message CSINode { + // Standard object's metadata. // metadata.name must be the Kubernetes node name. optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -234,7 +234,7 @@ message CSINode { // CSINodeDriver holds information about the specification of one CSI driver installed on a node message CSINodeDriver { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. optional string name = 1; @@ -314,11 +314,11 @@ message CSINodeSpec { // the scheduler assumes that capacity is insufficient and tries some other // node. message CSIStorageCapacity { - // Standard object's metadata. The name has no particular meaning. It must be - // be a DNS subdomain (dots allowed, 253 characters). To ensure that - // there are no conflicts with other CSI drivers on the cluster, the recommendation - // is to use csisc-, a generated name, or a reverse-domain name which ends - // with the unique CSI driver name. + // Standard object's metadata. + // The name has no particular meaning. It must be a DNS subdomain (dots allowed, 253 characters). + // To ensure that there are no conflicts with other CSI drivers on the cluster, + // the recommendation is to use csisc-, a generated name, or a reverse-domain name + // which ends with the unique CSI driver name. // // Objects are namespaced. // @@ -326,7 +326,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -335,7 +335,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -343,7 +343,7 @@ message CSIStorageCapacity { // This field is immutable. optional string storageClassName = 3; - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -355,7 +355,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -377,7 +377,7 @@ message CSIStorageCapacityList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name repeated CSIStorageCapacity items = 2; @@ -394,36 +394,36 @@ message StorageClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. optional string provisioner = 2; - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional map parameters = 3; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional optional string reclaimPolicy = 4; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional repeated string mountOptions = 5; - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand. // +optional optional bool allowVolumeExpansion = 6; - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional optional string volumeBindingMode = 7; - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. @@ -439,17 +439,17 @@ message StorageClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of StorageClasses + // items is the list of StorageClasses repeated StorageClass items = 2; } // TokenRequest contains parameters of a service account token. message TokenRequest { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. optional string audience = 1; - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec". // // +optional @@ -466,11 +466,11 @@ message VolumeAttachment { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. optional VolumeAttachmentSpec spec = 2; - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -484,7 +484,7 @@ message VolumeAttachmentList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; } @@ -493,7 +493,7 @@ message VolumeAttachmentList { // in future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; @@ -509,39 +509,39 @@ message VolumeAttachmentSource { // VolumeAttachmentSpec is the specification of a VolumeAttachment request. message VolumeAttachmentSpec { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). optional string attacher = 1; - // Source represents the volume that should be attached. + // source represents the volume that should be attached. optional VolumeAttachmentSource source = 2; - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. optional string nodeName = 3; } // VolumeAttachmentStatus is the status of a VolumeAttachment request. message VolumeAttachmentStatus { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. optional bool attached = 1; - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional map attachmentMetadata = 2; - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional optional VolumeError attachError = 3; - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -550,11 +550,11 @@ message VolumeAttachmentStatus { // VolumeError captures an error encountered during a volume operation. message VolumeError { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -563,7 +563,7 @@ message VolumeError { // VolumeNodeResources is a set of resource limits for scheduling of volumes. message VolumeNodeResources { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is not specified, then the supported number of volumes on this node is unbounded. diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index f57099df6..c785f368e 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -33,41 +33,42 @@ import ( // according to etcd is in ObjectMeta.Name. type StorageClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional ReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty" protobuf:"bytes,4,opt,name=reclaimPolicy,casttype=k8s.io/api/core/v1.PersistentVolumeReclaimPolicy"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,5,opt,name=mountOptions"` - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand. // +optional AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. @@ -81,12 +82,13 @@ type StorageClass struct { // StorageClassList is a collection of storage classes. type StorageClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of StorageClasses + // items is the list of StorageClasses Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -122,11 +124,11 @@ type VolumeAttachment struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -138,25 +140,26 @@ type VolumeAttachment struct { // VolumeAttachmentList is a collection of VolumeAttachment objects. type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. type VolumeAttachmentSpec struct { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` - // Source represents the volume that should be attached. + // source represents the volume that should be attached. Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` } @@ -165,7 +168,7 @@ type VolumeAttachmentSpec struct { // in future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` @@ -181,26 +184,26 @@ type VolumeAttachmentSource struct { // VolumeAttachmentStatus is the status of a VolumeAttachment request. type VolumeAttachmentStatus struct { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -209,11 +212,11 @@ type VolumeAttachmentStatus struct { // VolumeError captures an error encountered during a volume operation. type VolumeError struct { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -242,7 +245,7 @@ type CSIDriver struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. Spec CSIDriverSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` } @@ -279,16 +282,15 @@ type CSIDriverSpec struct { // +optional AttachRequired *bool `json:"attachRequired,omitempty" protobuf:"varint,1,opt,name=attachRequired"` - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name @@ -310,29 +312,27 @@ type CSIDriverSpec struct { PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html - // A driver can support one or more of these modes and - // more modes may be added in the future. - // This field is beta. + // A driver can support one or more of these modes and more modes may be added in the future. // + // This field is beta. // This field is immutable. // // +optional // +listType=set VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"` - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -349,7 +349,7 @@ type CSIDriverSpec struct { // +featureGate=CSIStorageCapacity StorageCapacity *bool `json:"storageCapacity,omitempty" protobuf:"bytes,4,opt,name=storageCapacity"` - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // @@ -359,10 +359,11 @@ type CSIDriverSpec struct { // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional FSGroupPolicy *FSGroupPolicy `json:"fsGroupPolicy,omitempty" protobuf:"bytes,5,opt,name=fsGroupPolicy"` - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -382,7 +383,7 @@ type CSIDriverSpec struct { // +listType=atomic TokenRequests []TokenRequest `json:"tokenRequests,omitempty" protobuf:"bytes,6,opt,name=tokenRequests"` - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -393,7 +394,7 @@ type CSIDriverSpec struct { // +optional RequiresRepublish *bool `json:"requiresRepublish,omitempty" protobuf:"varint,7,opt,name=requiresRepublish"` - // SELinuxMount specifies if the CSI driver supports "-o context" + // seLinuxMount specifies if the CSI driver supports "-o context" // mount option. // // When "true", the CSI driver must ensure that all volumes provided by this CSI @@ -411,6 +412,7 @@ type CSIDriverSpec struct { // // Default is "false". // + // +featureGate=SELinuxMountReadWriteOncePod // +optional SELinuxMount *bool `json:"seLinuxMount,omitempty" protobuf:"varint,8,opt,name=seLinuxMount"` } @@ -453,12 +455,11 @@ type VolumeLifecycleMode string // TokenRequest contains parameters of a service account token. type TokenRequest struct { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. - // Audience string `json:"audience" protobuf:"bytes,1,opt,name=audience"` - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec". // // +optional @@ -502,6 +503,7 @@ const ( type CSINode struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // metadata.name must be the Kubernetes node name. metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -520,7 +522,7 @@ type CSINodeSpec struct { // CSINodeDriver holds information about the specification of one CSI driver installed on a node type CSINodeDriver struct { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` @@ -557,7 +559,7 @@ type CSINodeDriver struct { // VolumeNodeResources is a set of resource limits for scheduling of volumes. type VolumeNodeResources struct { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is not specified, then the supported number of volumes on this node is unbounded. @@ -609,11 +611,12 @@ type CSINodeList struct { // node. type CSIStorageCapacity struct { metav1.TypeMeta `json:",inline"` - // Standard object's metadata. The name has no particular meaning. It must be - // be a DNS subdomain (dots allowed, 253 characters). To ensure that - // there are no conflicts with other CSI drivers on the cluster, the recommendation - // is to use csisc-, a generated name, or a reverse-domain name which ends - // with the unique CSI driver name. + + // Standard object's metadata. + // The name has no particular meaning. It must be a DNS subdomain (dots allowed, 253 characters). + // To ensure that there are no conflicts with other CSI drivers on the cluster, + // the recommendation is to use csisc-, a generated name, or a reverse-domain name + // which ends with the unique CSI driver name. // // Objects are namespaced. // @@ -621,7 +624,7 @@ type CSIStorageCapacity struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -630,7 +633,7 @@ type CSIStorageCapacity struct { // +optional NodeTopology *metav1.LabelSelector `json:"nodeTopology,omitempty" protobuf:"bytes,2,opt,name=nodeTopology"` - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -638,7 +641,7 @@ type CSIStorageCapacity struct { // This field is immutable. StorageClassName string `json:"storageClassName" protobuf:"bytes,3,name=storageClassName"` - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -650,7 +653,7 @@ type CSIStorageCapacity struct { // +optional Capacity *resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,4,opt,name=capacity"` - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -670,12 +673,13 @@ type CSIStorageCapacity struct { // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. type CSIStorageCapacityList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index 1a069bb40..c92a7f95a 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CSIDriver = map[string]string{ "": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the CSI Driver.", + "spec": "spec represents the specification of the CSI Driver.", } func (CSIDriver) SwaggerDoc() map[string]string { @@ -50,13 +50,13 @@ func (CSIDriverList) SwaggerDoc() map[string]string { var map_CSIDriverSpec = map[string]string{ "": "CSIDriverSpec is the specification of a CSIDriver.", "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", - "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", - "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.\n\nThis field is immutable.", - "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", - "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", - "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", - "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", - "seLinuxMount": "SELinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".", + "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is beta. This field is immutable.", + "storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", + "fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", + "tokenRequests": "tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", + "requiresRepublish": "requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", + "seLinuxMount": "seLinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".", } func (CSIDriverSpec) SwaggerDoc() map[string]string { @@ -65,7 +65,7 @@ func (CSIDriverSpec) SwaggerDoc() map[string]string { var map_CSINode = map[string]string{ "": "CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.", - "metadata": "metadata.name must be the Kubernetes node name.", + "metadata": "Standard object's metadata. metadata.name must be the Kubernetes node name.", "spec": "spec is the specification of CSINode", } @@ -75,7 +75,7 @@ func (CSINode) SwaggerDoc() map[string]string { var map_CSINodeDriver = map[string]string{ "": "CSINodeDriver holds information about the specification of one CSI driver installed on a node", - "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", + "name": "name represents the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", "allocatable": "allocatable represents the volume resources of a node that are available for scheduling. This field is beta.", @@ -106,11 +106,11 @@ func (CSINodeSpec) SwaggerDoc() map[string]string { var map_CSIStorageCapacity = map[string]string{ "": "CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\nFor example this can express things like: - StorageClass \"standard\" has \"1234 GiB\" available in \"topology.kubernetes.io/zone=us-east1\" - StorageClass \"localssd\" has \"10 GiB\" available in \"kubernetes.io/hostname=knode-abc123\"\n\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\nThe producer of these objects can decide which approach is more suitable.\n\nThey are consumed by the kube-scheduler when a CSI driver opts into capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler compares the MaximumVolumeSize against the requested size of pending volumes to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back to a comparison against the less precise Capacity. If that is also unset, the scheduler assumes that capacity is insufficient and tries some other node.", - "metadata": "Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "nodeTopology": "NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", - "storageClassName": "The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", - "capacity": "Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", - "maximumVolumeSize": "MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", + "metadata": "Standard object's metadata. The name has no particular meaning. It must be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "nodeTopology": "nodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", + "storageClassName": "storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", + "capacity": "capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", + "maximumVolumeSize": "maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", } func (CSIStorageCapacity) SwaggerDoc() map[string]string { @@ -120,7 +120,7 @@ func (CSIStorageCapacity) SwaggerDoc() map[string]string { var map_CSIStorageCapacityList = map[string]string{ "": "CSIStorageCapacityList is a collection of CSIStorageCapacity objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of CSIStorageCapacity objects.", + "items": "items is the list of CSIStorageCapacity objects.", } func (CSIStorageCapacityList) SwaggerDoc() map[string]string { @@ -130,13 +130,13 @@ func (CSIStorageCapacityList) SwaggerDoc() map[string]string { var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "provisioner": "Provisioner indicates the type of the provisioner.", - "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", - "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", - "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", - "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", - "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", - "allowedTopologies": "Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", + "provisioner": "provisioner indicates the type of the provisioner.", + "parameters": "parameters holds the parameters for the provisioner that should create volumes of this storage class.", + "reclaimPolicy": "reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. Defaults to Delete.", + "mountOptions": "mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", + "allowVolumeExpansion": "allowVolumeExpansion shows whether the storage class allow volume expand.", + "volumeBindingMode": "volumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", + "allowedTopologies": "allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", } func (StorageClass) SwaggerDoc() map[string]string { @@ -146,7 +146,7 @@ func (StorageClass) SwaggerDoc() map[string]string { var map_StorageClassList = map[string]string{ "": "StorageClassList is a collection of storage classes.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of StorageClasses", + "items": "items is the list of StorageClasses", } func (StorageClassList) SwaggerDoc() map[string]string { @@ -155,8 +155,8 @@ func (StorageClassList) SwaggerDoc() map[string]string { var map_TokenRequest = map[string]string{ "": "TokenRequest contains parameters of a service account token.", - "audience": "Audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", - "expirationSeconds": "ExpirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\".", + "audience": "audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", + "expirationSeconds": "expirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\".", } func (TokenRequest) SwaggerDoc() map[string]string { @@ -166,8 +166,8 @@ func (TokenRequest) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", - "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", + "spec": "spec represents specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "status represents status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } func (VolumeAttachment) SwaggerDoc() map[string]string { @@ -177,7 +177,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of VolumeAttachments", + "items": "items is the list of VolumeAttachments", } func (VolumeAttachmentList) SwaggerDoc() map[string]string { @@ -186,7 +186,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { var map_VolumeAttachmentSource = map[string]string{ "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", - "persistentVolumeName": "Name of the persistent volume to attach.", + "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } func (VolumeAttachmentSource) SwaggerDoc() map[string]string { @@ -195,9 +195,9 @@ func (VolumeAttachmentSource) SwaggerDoc() map[string]string { var map_VolumeAttachmentSpec = map[string]string{ "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", - "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", - "source": "Source represents the volume that should be attached.", - "nodeName": "The node that the volume should be attached to.", + "attacher": "attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "source represents the volume that should be attached.", + "nodeName": "nodeName represents the node that the volume should be attached to.", } func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { @@ -206,10 +206,10 @@ func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { var map_VolumeAttachmentStatus = map[string]string{ "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", - "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", + "attached": "attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "attachError represents the last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "detachError represents the last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", } func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { @@ -218,8 +218,8 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", - "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", + "time": "time represents the time the error was encountered.", + "message": "message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { @@ -228,7 +228,7 @@ func (VolumeError) SwaggerDoc() map[string]string { var map_VolumeNodeResources = map[string]string{ "": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", - "count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.", + "count": "count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.", } func (VolumeNodeResources) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto index a53451226..88250a0f0 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -67,7 +67,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -76,7 +76,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -84,7 +84,7 @@ message CSIStorageCapacity { // This field is immutable. optional string storageClassName = 3; - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -96,7 +96,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -118,7 +118,7 @@ message CSIStorageCapacityList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name repeated CSIStorageCapacity items = 2; @@ -134,11 +134,11 @@ message VolumeAttachment { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. optional VolumeAttachmentSpec spec = 2; - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -152,7 +152,7 @@ message VolumeAttachmentList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; } @@ -161,7 +161,7 @@ message VolumeAttachmentList { // in future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; @@ -177,39 +177,39 @@ message VolumeAttachmentSource { // VolumeAttachmentSpec is the specification of a VolumeAttachment request. message VolumeAttachmentSpec { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). optional string attacher = 1; - // Source represents the volume that should be attached. + // source represents the volume that should be attached. optional VolumeAttachmentSource source = 2; - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. optional string nodeName = 3; } // VolumeAttachmentStatus is the status of a VolumeAttachment request. message VolumeAttachmentStatus { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. optional bool attached = 1; - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional map attachmentMetadata = 2; - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional optional VolumeError attachError = 3; - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -218,11 +218,11 @@ message VolumeAttachmentStatus { // VolumeError captures an error encountered during a volume operation. message VolumeError { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string maybe logged, so it should not contain sensitive // information. // +optional diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go index fe8c9e3cd..59ef348a3 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types.go @@ -41,11 +41,11 @@ type VolumeAttachment struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -60,25 +60,26 @@ type VolumeAttachment struct { // VolumeAttachmentList is a collection of VolumeAttachment objects. type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. type VolumeAttachmentSpec struct { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` - // Source represents the volume that should be attached. + // source represents the volume that should be attached. Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` } @@ -87,7 +88,7 @@ type VolumeAttachmentSpec struct { // in future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` @@ -103,26 +104,26 @@ type VolumeAttachmentSource struct { // VolumeAttachmentStatus is the status of a VolumeAttachment request. type VolumeAttachmentStatus struct { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -131,11 +132,11 @@ type VolumeAttachmentStatus struct { // VolumeError captures an error encountered during a volume operation. type VolumeError struct { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string maybe logged, so it should not contain sensitive // information. // +optional @@ -174,6 +175,7 @@ type VolumeError struct { // node. type CSIStorageCapacity struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. The name has no particular meaning. It must be // be a DNS subdomain (dots allowed, 253 characters). To ensure that // there are no conflicts with other CSI drivers on the cluster, the recommendation @@ -186,7 +188,7 @@ type CSIStorageCapacity struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -195,7 +197,7 @@ type CSIStorageCapacity struct { // +optional NodeTopology *metav1.LabelSelector `json:"nodeTopology,omitempty" protobuf:"bytes,2,opt,name=nodeTopology"` - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -203,7 +205,7 @@ type CSIStorageCapacity struct { // This field is immutable. StorageClassName string `json:"storageClassName" protobuf:"bytes,3,name=storageClassName"` - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -215,7 +217,7 @@ type CSIStorageCapacity struct { // +optional Capacity *resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,4,opt,name=capacity"` - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -238,12 +240,13 @@ type CSIStorageCapacity struct { // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. type CSIStorageCapacityList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go index a228a3fec..ba6afbd59 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CSIStorageCapacity = map[string]string{ "": "CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\nFor example this can express things like: - StorageClass \"standard\" has \"1234 GiB\" available in \"topology.kubernetes.io/zone=us-east1\" - StorageClass \"localssd\" has \"10 GiB\" available in \"kubernetes.io/hostname=knode-abc123\"\n\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\nThe producer of these objects can decide which approach is more suitable.\n\nThey are consumed by the kube-scheduler when a CSI driver opts into capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler compares the MaximumVolumeSize against the requested size of pending volumes to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back to a comparison against the less precise Capacity. If that is also unset, the scheduler assumes that capacity is insufficient and tries some other node.", "metadata": "Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "nodeTopology": "NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", - "storageClassName": "The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", - "capacity": "Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", - "maximumVolumeSize": "MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", + "nodeTopology": "nodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", + "storageClassName": "storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", + "capacity": "capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", + "maximumVolumeSize": "maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", } func (CSIStorageCapacity) SwaggerDoc() map[string]string { @@ -43,7 +43,7 @@ func (CSIStorageCapacity) SwaggerDoc() map[string]string { var map_CSIStorageCapacityList = map[string]string{ "": "CSIStorageCapacityList is a collection of CSIStorageCapacity objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of CSIStorageCapacity objects.", + "items": "items is the list of CSIStorageCapacity objects.", } func (CSIStorageCapacityList) SwaggerDoc() map[string]string { @@ -53,8 +53,8 @@ func (CSIStorageCapacityList) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", - "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", + "spec": "spec represents specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "status represents status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } func (VolumeAttachment) SwaggerDoc() map[string]string { @@ -64,7 +64,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of VolumeAttachments", + "items": "items is the list of VolumeAttachments", } func (VolumeAttachmentList) SwaggerDoc() map[string]string { @@ -73,7 +73,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { var map_VolumeAttachmentSource = map[string]string{ "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", - "persistentVolumeName": "Name of the persistent volume to attach.", + "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } func (VolumeAttachmentSource) SwaggerDoc() map[string]string { @@ -82,9 +82,9 @@ func (VolumeAttachmentSource) SwaggerDoc() map[string]string { var map_VolumeAttachmentSpec = map[string]string{ "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", - "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", - "source": "Source represents the volume that should be attached.", - "nodeName": "The node that the volume should be attached to.", + "attacher": "attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "source represents the volume that should be attached.", + "nodeName": "nodeName represents the node that the volume should be attached to.", } func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { @@ -93,10 +93,10 @@ func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { var map_VolumeAttachmentStatus = map[string]string{ "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", - "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", + "attached": "attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "attachError represents the last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "detachError represents the last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", } func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { @@ -105,8 +105,8 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", - "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", + "time": "time represents the time the error was encountered.", + "message": "message represents the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index bedbd3183..2b354dd47 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -49,7 +49,7 @@ message CSIDriver { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. optional CSIDriverSpec spec = 2; } @@ -82,16 +82,15 @@ message CSIDriverSpec { // +optional optional bool attachRequired = 1; - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name @@ -112,14 +111,14 @@ message CSIDriverSpec { // +optional optional bool podInfoOnMount = 2; - // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html // A driver can support one or more of these modes and @@ -130,10 +129,9 @@ message CSIDriverSpec { // +optional repeated string volumeLifecycleModes = 3; - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -149,7 +147,7 @@ message CSIDriverSpec { // +optional optional bool storageCapacity = 4; - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // @@ -159,10 +157,11 @@ message CSIDriverSpec { // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional optional string fsGroupPolicy = 5; - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -182,7 +181,7 @@ message CSIDriverSpec { // +listType=atomic repeated TokenRequest tokenRequests = 6; - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -193,7 +192,7 @@ message CSIDriverSpec { // +optional optional bool requiresRepublish = 7; - // SELinuxMount specifies if the CSI driver supports "-o context" + // seLinuxMount specifies if the CSI driver supports "-o context" // mount option. // // When "true", the CSI driver must ensure that all volumes provided by this CSI @@ -211,6 +210,7 @@ message CSIDriverSpec { // // Default is "false". // + // +featureGate=SELinuxMountReadWriteOncePod // +optional optional bool seLinuxMount = 8; } @@ -236,7 +236,7 @@ message CSINode { // CSINodeDriver holds information about the specification of one CSI driver installed on a node message CSINodeDriver { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. optional string name = 1; @@ -327,7 +327,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -336,7 +336,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -344,7 +344,7 @@ message CSIStorageCapacity { // This field is immutable. optional string storageClassName = 3; - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -356,7 +356,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -378,7 +378,7 @@ message CSIStorageCapacityList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name repeated CSIStorageCapacity items = 2; @@ -395,36 +395,36 @@ message StorageClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. optional string provisioner = 2; - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional map parameters = 3; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional optional string reclaimPolicy = 4; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional repeated string mountOptions = 5; - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand // +optional optional bool allowVolumeExpansion = 6; - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional optional string volumeBindingMode = 7; - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. @@ -440,17 +440,17 @@ message StorageClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of StorageClasses + // items is the list of StorageClasses repeated StorageClass items = 2; } // TokenRequest contains parameters of a service account token. message TokenRequest { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. optional string audience = 1; - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec" // // +optional @@ -467,11 +467,11 @@ message VolumeAttachment { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. optional VolumeAttachmentSpec spec = 2; - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -485,7 +485,7 @@ message VolumeAttachmentList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; } @@ -494,7 +494,7 @@ message VolumeAttachmentList { // in future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; @@ -510,39 +510,39 @@ message VolumeAttachmentSource { // VolumeAttachmentSpec is the specification of a VolumeAttachment request. message VolumeAttachmentSpec { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). optional string attacher = 1; - // Source represents the volume that should be attached. + // source represents the volume that should be attached. optional VolumeAttachmentSource source = 2; - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. optional string nodeName = 3; } // VolumeAttachmentStatus is the status of a VolumeAttachment request. message VolumeAttachmentStatus { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. optional bool attached = 1; - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional map attachmentMetadata = 2; - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional optional VolumeError attachError = 3; - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -551,11 +551,11 @@ message VolumeAttachmentStatus { // VolumeError captures an error encountered during a volume operation. message VolumeError { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -564,7 +564,7 @@ message VolumeError { // VolumeNodeResources is a set of resource limits for scheduling of volumes. message VolumeNodeResources { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is nil, then the supported number of volumes on this node is unbounded. diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index f4d09b641..4c39b49cc 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -36,41 +36,42 @@ import ( // according to etcd is in ObjectMeta.Name. type StorageClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional ReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty" protobuf:"bytes,4,opt,name=reclaimPolicy,casttype=k8s.io/api/core/v1.PersistentVolumeReclaimPolicy"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,5,opt,name=mountOptions"` - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand // +optional AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. @@ -87,12 +88,13 @@ type StorageClass struct { // StorageClassList is a collection of storage classes. type StorageClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of StorageClasses + // items is the list of StorageClasses Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -130,11 +132,11 @@ type VolumeAttachment struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -149,25 +151,26 @@ type VolumeAttachment struct { // VolumeAttachmentList is a collection of VolumeAttachment objects. type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. type VolumeAttachmentSpec struct { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` - // Source represents the volume that should be attached. + // source represents the volume that should be attached. Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` } @@ -176,7 +179,7 @@ type VolumeAttachmentSpec struct { // in future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` @@ -192,26 +195,26 @@ type VolumeAttachmentSource struct { // VolumeAttachmentStatus is the status of a VolumeAttachment request. type VolumeAttachmentStatus struct { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -220,11 +223,11 @@ type VolumeAttachmentStatus struct { // VolumeError captures an error encountered during a volume operation. type VolumeError struct { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -259,7 +262,7 @@ type CSIDriver struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. Spec CSIDriverSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` } @@ -299,16 +302,15 @@ type CSIDriverSpec struct { // +optional AttachRequired *bool `json:"attachRequired,omitempty" protobuf:"varint,1,opt,name=attachRequired"` - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name @@ -329,14 +331,14 @@ type CSIDriverSpec struct { // +optional PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` - // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html // A driver can support one or more of these modes and @@ -347,11 +349,9 @@ type CSIDriverSpec struct { // +optional VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"` - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. - // + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -367,7 +367,7 @@ type CSIDriverSpec struct { // +optional StorageCapacity *bool `json:"storageCapacity,omitempty" protobuf:"bytes,4,opt,name=storageCapacity"` - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // @@ -377,10 +377,11 @@ type CSIDriverSpec struct { // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional FSGroupPolicy *FSGroupPolicy `json:"fsGroupPolicy,omitempty" protobuf:"bytes,5,opt,name=fsGroupPolicy"` - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -400,7 +401,7 @@ type CSIDriverSpec struct { // +listType=atomic TokenRequests []TokenRequest `json:"tokenRequests,omitempty" protobuf:"bytes,6,opt,name=tokenRequests"` - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -411,7 +412,7 @@ type CSIDriverSpec struct { // +optional RequiresRepublish *bool `json:"requiresRepublish,omitempty" protobuf:"varint,7,opt,name=requiresRepublish"` - // SELinuxMount specifies if the CSI driver supports "-o context" + // seLinuxMount specifies if the CSI driver supports "-o context" // mount option. // // When "true", the CSI driver must ensure that all volumes provided by this CSI @@ -429,6 +430,7 @@ type CSIDriverSpec struct { // // Default is "false". // + // +featureGate=SELinuxMountReadWriteOncePod // +optional SELinuxMount *bool `json:"seLinuxMount,omitempty" protobuf:"varint,8,opt,name=seLinuxMount"` } @@ -466,12 +468,11 @@ type VolumeLifecycleMode string // TokenRequest contains parameters of a service account token. type TokenRequest struct { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. - // Audience string `json:"audience" protobuf:"bytes,1,opt,name=audience"` - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec" // // +optional @@ -539,7 +540,7 @@ type CSINodeSpec struct { // CSINodeDriver holds information about the specification of one CSI driver installed on a node type CSINodeDriver struct { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` @@ -575,7 +576,7 @@ type CSINodeDriver struct { // VolumeNodeResources is a set of resource limits for scheduling of volumes. type VolumeNodeResources struct { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is nil, then the supported number of volumes on this node is unbounded. @@ -634,6 +635,7 @@ type CSINodeList struct { // node. type CSIStorageCapacity struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. The name has no particular meaning. It must be // be a DNS subdomain (dots allowed, 253 characters). To ensure that // there are no conflicts with other CSI drivers on the cluster, the recommendation @@ -646,7 +648,7 @@ type CSIStorageCapacity struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -655,7 +657,7 @@ type CSIStorageCapacity struct { // +optional NodeTopology *metav1.LabelSelector `json:"nodeTopology,omitempty" protobuf:"bytes,2,opt,name=nodeTopology"` - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -663,7 +665,7 @@ type CSIStorageCapacity struct { // This field is immutable. StorageClassName string `json:"storageClassName" protobuf:"bytes,3,name=storageClassName"` - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -675,7 +677,7 @@ type CSIStorageCapacity struct { // +optional Capacity *resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,4,opt,name=capacity"` - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -698,12 +700,13 @@ type CSIStorageCapacity struct { // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. type CSIStorageCapacityList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index ea3c1e4c2..0f2718b9c 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CSIDriver = map[string]string{ "": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. CSI drivers do not need to create the CSIDriver object directly. Instead they may use the cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically creates a CSIDriver object representing the driver. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the CSI Driver.", + "spec": "spec represents the specification of the CSI Driver.", } func (CSIDriver) SwaggerDoc() map[string]string { @@ -50,13 +50,13 @@ func (CSIDriverList) SwaggerDoc() map[string]string { var map_CSIDriverSpec = map[string]string{ "": "CSIDriverSpec is the specification of a CSIDriver.", "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", - "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", - "volumeLifecycleModes": "VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is immutable.", - "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", - "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", - "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", - "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", - "seLinuxMount": "SELinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".", + "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is immutable.", + "storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", + "fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", + "tokenRequests": "tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", + "requiresRepublish": "requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", + "seLinuxMount": "seLinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".", } func (CSIDriverSpec) SwaggerDoc() map[string]string { @@ -75,7 +75,7 @@ func (CSINode) SwaggerDoc() map[string]string { var map_CSINodeDriver = map[string]string{ "": "CSINodeDriver holds information about the specification of one CSI driver installed on a node", - "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", + "name": "name represents the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", "allocatable": "allocatable represents the volume resources of a node that are available for scheduling.", @@ -107,10 +107,10 @@ func (CSINodeSpec) SwaggerDoc() map[string]string { var map_CSIStorageCapacity = map[string]string{ "": "CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\nFor example this can express things like: - StorageClass \"standard\" has \"1234 GiB\" available in \"topology.kubernetes.io/zone=us-east1\" - StorageClass \"localssd\" has \"10 GiB\" available in \"kubernetes.io/hostname=knode-abc123\"\n\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\nThe producer of these objects can decide which approach is more suitable.\n\nThey are consumed by the kube-scheduler when a CSI driver opts into capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler compares the MaximumVolumeSize against the requested size of pending volumes to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back to a comparison against the less precise Capacity. If that is also unset, the scheduler assumes that capacity is insufficient and tries some other node.", "metadata": "Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "nodeTopology": "NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", - "storageClassName": "The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", - "capacity": "Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", - "maximumVolumeSize": "MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", + "nodeTopology": "nodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", + "storageClassName": "storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", + "capacity": "capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", + "maximumVolumeSize": "maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", } func (CSIStorageCapacity) SwaggerDoc() map[string]string { @@ -120,7 +120,7 @@ func (CSIStorageCapacity) SwaggerDoc() map[string]string { var map_CSIStorageCapacityList = map[string]string{ "": "CSIStorageCapacityList is a collection of CSIStorageCapacity objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of CSIStorageCapacity objects.", + "items": "items is the list of CSIStorageCapacity objects.", } func (CSIStorageCapacityList) SwaggerDoc() map[string]string { @@ -130,13 +130,13 @@ func (CSIStorageCapacityList) SwaggerDoc() map[string]string { var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "provisioner": "Provisioner indicates the type of the provisioner.", - "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", - "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", - "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", - "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", - "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", - "allowedTopologies": "Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", + "provisioner": "provisioner indicates the type of the provisioner.", + "parameters": "parameters holds the parameters for the provisioner that should create volumes of this storage class.", + "reclaimPolicy": "reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. Defaults to Delete.", + "mountOptions": "mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", + "allowVolumeExpansion": "allowVolumeExpansion shows whether the storage class allow volume expand", + "volumeBindingMode": "volumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", + "allowedTopologies": "allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", } func (StorageClass) SwaggerDoc() map[string]string { @@ -146,7 +146,7 @@ func (StorageClass) SwaggerDoc() map[string]string { var map_StorageClassList = map[string]string{ "": "StorageClassList is a collection of storage classes.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of StorageClasses", + "items": "items is the list of StorageClasses", } func (StorageClassList) SwaggerDoc() map[string]string { @@ -155,8 +155,8 @@ func (StorageClassList) SwaggerDoc() map[string]string { var map_TokenRequest = map[string]string{ "": "TokenRequest contains parameters of a service account token.", - "audience": "Audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", - "expirationSeconds": "ExpirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\"", + "audience": "audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", + "expirationSeconds": "expirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\"", } func (TokenRequest) SwaggerDoc() map[string]string { @@ -166,8 +166,8 @@ func (TokenRequest) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", - "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", + "spec": "spec represents specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "status represents status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } func (VolumeAttachment) SwaggerDoc() map[string]string { @@ -177,7 +177,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of VolumeAttachments", + "items": "items is the list of VolumeAttachments", } func (VolumeAttachmentList) SwaggerDoc() map[string]string { @@ -186,7 +186,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { var map_VolumeAttachmentSource = map[string]string{ "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", - "persistentVolumeName": "Name of the persistent volume to attach.", + "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } func (VolumeAttachmentSource) SwaggerDoc() map[string]string { @@ -195,9 +195,9 @@ func (VolumeAttachmentSource) SwaggerDoc() map[string]string { var map_VolumeAttachmentSpec = map[string]string{ "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", - "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", - "source": "Source represents the volume that should be attached.", - "nodeName": "The node that the volume should be attached to.", + "attacher": "attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "source represents the volume that should be attached.", + "nodeName": "nodeName represents the node that the volume should be attached to.", } func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { @@ -206,10 +206,10 @@ func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { var map_VolumeAttachmentStatus = map[string]string{ "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", - "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", + "attached": "attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "attachError represents the last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "detachError represents the last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", } func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { @@ -218,8 +218,8 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", - "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", + "time": "time represents the time the error was encountered.", + "message": "message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { @@ -228,7 +228,7 @@ func (VolumeError) SwaggerDoc() map[string]string { var map_VolumeNodeResources = map[string]string{ "": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", - "count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded.", + "count": "count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded.", } func (VolumeNodeResources) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go new file mode 100644 index 000000000..ea1dc377b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use +// with apply. +type MatchConditionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Expression *string `json:"expression,omitempty"` +} + +// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with +// apply. +func MatchCondition() *MatchConditionApplyConfiguration { + return &MatchConditionApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithName(value string) *MatchConditionApplyConfiguration { + b.Name = &value + return b +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithExpression(value string) *MatchConditionApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go index eba37bafd..faff51a04 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go @@ -37,6 +37,7 @@ type MutatingWebhookApplyConfiguration struct { TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"` ReinvocationPolicy *admissionregistrationv1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } // MutatingWebhookApplyConfiguration constructs an declarative configuration of the MutatingWebhook type for use with @@ -139,3 +140,16 @@ func (b *MutatingWebhookApplyConfiguration) WithReinvocationPolicy(value admissi b.ReinvocationPolicy = &value return b } + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *MutatingWebhookApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *MutatingWebhookApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go index d0691de10..613856bac 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go @@ -36,6 +36,7 @@ type ValidatingWebhookApplyConfiguration struct { SideEffects *admissionregistrationv1.SideEffectClass `json:"sideEffects,omitempty"` TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } // ValidatingWebhookApplyConfiguration constructs an declarative configuration of the ValidatingWebhook type for use with @@ -130,3 +131,16 @@ func (b *ValidatingWebhookApplyConfiguration) WithAdmissionReviewVersions(values } return b } + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *ValidatingWebhookApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *ValidatingWebhookApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/admissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/admissionpolicyspec.go deleted file mode 100644 index 4936110fb..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/admissionpolicyspec.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" -) - -// AdmissionPolicySpecApplyConfiguration represents an declarative configuration of the AdmissionPolicySpec type for use -// with apply. -type AdmissionPolicySpecApplyConfiguration struct { - ParamSource *ParamSourceApplyConfiguration `json:"paramSource,omitempty"` - MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"` - Validations []ValidationApplyConfiguration `json:"validations,omitempty"` - FailurePolicy *admissionregistrationv1alpha1.FailurePolicyType `json:"failurePolicy,omitempty"` -} - -// AdmissionPolicySpecApplyConfiguration constructs an declarative configuration of the AdmissionPolicySpec type for use with -// apply. -func AdmissionPolicySpec() *AdmissionPolicySpecApplyConfiguration { - return &AdmissionPolicySpecApplyConfiguration{} -} - -// WithParamSource sets the ParamSource field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ParamSource field is set to the value of the last call. -func (b *AdmissionPolicySpecApplyConfiguration) WithParamSource(value *ParamSourceApplyConfiguration) *AdmissionPolicySpecApplyConfiguration { - b.ParamSource = value - return b -} - -// WithMatchResources sets the MatchResources field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the MatchResources field is set to the value of the last call. -func (b *AdmissionPolicySpecApplyConfiguration) WithMatchResources(value *MatchResourcesApplyConfiguration) *AdmissionPolicySpecApplyConfiguration { - b.MatchResources = value - return b -} - -// WithValidations adds the given value to the Validations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Validations field. -func (b *AdmissionPolicySpecApplyConfiguration) WithValidations(values ...*ValidationApplyConfiguration) *AdmissionPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithValidations") - } - b.Validations = append(b.Validations, *values[i]) - } - return b -} - -// WithFailurePolicy sets the FailurePolicy field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the FailurePolicy field is set to the value of the last call. -func (b *AdmissionPolicySpecApplyConfiguration) WithFailurePolicy(value admissionregistrationv1alpha1.FailurePolicyType) *AdmissionPolicySpecApplyConfiguration { - b.FailurePolicy = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go new file mode 100644 index 000000000..023695139 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// AuditAnnotationApplyConfiguration represents an declarative configuration of the AuditAnnotation type for use +// with apply. +type AuditAnnotationApplyConfiguration struct { + Key *string `json:"key,omitempty"` + ValueExpression *string `json:"valueExpression,omitempty"` +} + +// AuditAnnotationApplyConfiguration constructs an declarative configuration of the AuditAnnotation type for use with +// apply. +func AuditAnnotation() *AuditAnnotationApplyConfiguration { + return &AuditAnnotationApplyConfiguration{} +} + +// WithKey sets the Key field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Key field is set to the value of the last call. +func (b *AuditAnnotationApplyConfiguration) WithKey(value string) *AuditAnnotationApplyConfiguration { + b.Key = &value + return b +} + +// WithValueExpression sets the ValueExpression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ValueExpression field is set to the value of the last call. +func (b *AuditAnnotationApplyConfiguration) WithValueExpression(value string) *AuditAnnotationApplyConfiguration { + b.ValueExpression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go new file mode 100644 index 000000000..f8b511f51 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ExpressionWarningApplyConfiguration represents an declarative configuration of the ExpressionWarning type for use +// with apply. +type ExpressionWarningApplyConfiguration struct { + FieldRef *string `json:"fieldRef,omitempty"` + Warning *string `json:"warning,omitempty"` +} + +// ExpressionWarningApplyConfiguration constructs an declarative configuration of the ExpressionWarning type for use with +// apply. +func ExpressionWarning() *ExpressionWarningApplyConfiguration { + return &ExpressionWarningApplyConfiguration{} +} + +// WithFieldRef sets the FieldRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FieldRef field is set to the value of the last call. +func (b *ExpressionWarningApplyConfiguration) WithFieldRef(value string) *ExpressionWarningApplyConfiguration { + b.FieldRef = &value + return b +} + +// WithWarning sets the Warning field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Warning field is set to the value of the last call. +func (b *ExpressionWarningApplyConfiguration) WithWarning(value string) *ExpressionWarningApplyConfiguration { + b.Warning = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramsource.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go similarity index 50% rename from vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramsource.go rename to vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go index a7a5a6af8..186c750f9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go @@ -18,31 +18,31 @@ limitations under the License. package v1alpha1 -// ParamSourceApplyConfiguration represents an declarative configuration of the ParamSource type for use +// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use // with apply. -type ParamSourceApplyConfiguration struct { - APIVersion *string `json:"apiVersion,omitempty"` - Kind *string `json:"kind,omitempty"` +type MatchConditionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Expression *string `json:"expression,omitempty"` } -// ParamSourceApplyConfiguration constructs an declarative configuration of the ParamSource type for use with +// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with // apply. -func ParamSource() *ParamSourceApplyConfiguration { - return &ParamSourceApplyConfiguration{} +func MatchCondition() *MatchConditionApplyConfiguration { + return &MatchConditionApplyConfiguration{} } -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *ParamSourceApplyConfiguration) WithAPIVersion(value string) *ParamSourceApplyConfiguration { - b.APIVersion = &value +// If called multiple times, the Name field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithName(value string) *MatchConditionApplyConfiguration { + b.Name = &value return b } -// WithKind sets the Kind field in the declarative configuration to the given value +// WithExpression sets the Expression field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *ParamSourceApplyConfiguration) WithKind(value string) *ParamSourceApplyConfiguration { - b.Kind = &value +// If called multiple times, the Expression field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithExpression(value string) *MatchConditionApplyConfiguration { + b.Expression = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rule.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rule.go deleted file mode 100644 index 313de9d5f..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rule.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" -) - -// RuleApplyConfiguration represents an declarative configuration of the Rule type for use -// with apply. -type RuleApplyConfiguration struct { - APIGroups []string `json:"apiGroups,omitempty"` - APIVersions []string `json:"apiVersions,omitempty"` - Resources []string `json:"resources,omitempty"` - Scope *v1alpha1.ScopeType `json:"scope,omitempty"` -} - -// RuleApplyConfiguration constructs an declarative configuration of the Rule type for use with -// apply. -func Rule() *RuleApplyConfiguration { - return &RuleApplyConfiguration{} -} - -// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIGroups field. -func (b *RuleApplyConfiguration) WithAPIGroups(values ...string) *RuleApplyConfiguration { - for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) - } - return b -} - -// WithAPIVersions adds the given value to the APIVersions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIVersions field. -func (b *RuleApplyConfiguration) WithAPIVersions(values ...string) *RuleApplyConfiguration { - for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) - } - return b -} - -// WithResources adds the given value to the Resources field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Resources field. -func (b *RuleApplyConfiguration) WithResources(values ...string) *RuleApplyConfiguration { - for i := range values { - b.Resources = append(b.Resources, values[i]) - } - return b -} - -// WithScope sets the Scope field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleApplyConfiguration) WithScope(value v1alpha1.ScopeType) *RuleApplyConfiguration { - b.Scope = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rulewithoperations.go deleted file mode 100644 index 112f4826b..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rulewithoperations.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/api/admissionregistration/v1" - admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" -) - -// RuleWithOperationsApplyConfiguration represents an declarative configuration of the RuleWithOperations type for use -// with apply. -type RuleWithOperationsApplyConfiguration struct { - Operations []v1.OperationType `json:"operations,omitempty"` - admissionregistrationv1.RuleApplyConfiguration `json:",inline"` -} - -// RuleWithOperationsApplyConfiguration constructs an declarative configuration of the RuleWithOperations type for use with -// apply. -func RuleWithOperations() *RuleWithOperationsApplyConfiguration { - return &RuleWithOperationsApplyConfiguration{} -} - -// WithOperations adds the given value to the Operations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Operations field. -func (b *RuleWithOperationsApplyConfiguration) WithOperations(values ...v1.OperationType) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.Operations = append(b.Operations, values[i]) - } - return b -} - -// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIGroups field. -func (b *RuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) - } - return b -} - -// WithAPIVersions adds the given value to the APIVersions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIVersions field. -func (b *RuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) - } - return b -} - -// WithResources adds the given value to the Resources field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Resources field. -func (b *RuleWithOperationsApplyConfiguration) WithResources(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.Resources = append(b.Resources, values[i]) - } - return b -} - -// WithScope sets the Scope field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleWithOperationsApplyConfiguration) WithScope(value v1.ScopeType) *RuleWithOperationsApplyConfiguration { - b.Scope = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go new file mode 100644 index 000000000..42a917071 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// TypeCheckingApplyConfiguration represents an declarative configuration of the TypeChecking type for use +// with apply. +type TypeCheckingApplyConfiguration struct { + ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"` +} + +// TypeCheckingApplyConfiguration constructs an declarative configuration of the TypeChecking type for use with +// apply. +func TypeChecking() *TypeCheckingApplyConfiguration { + return &TypeCheckingApplyConfiguration{} +} + +// WithExpressionWarnings adds the given value to the ExpressionWarnings field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExpressionWarnings field. +func (b *TypeCheckingApplyConfiguration) WithExpressionWarnings(values ...*ExpressionWarningApplyConfiguration) *TypeCheckingApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithExpressionWarnings") + } + b.ExpressionWarnings = append(b.ExpressionWarnings, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go index 3a23e0c72..c860b85cf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -32,7 +32,8 @@ import ( type ValidatingAdmissionPolicyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"` + Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"` + Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"` } // ValidatingAdmissionPolicy constructs an declarative configuration of the ValidatingAdmissionPolicy type for use with @@ -245,3 +246,11 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithSpec(value *Validating b.Spec = value return b } + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *ValidatingAdmissionPolicyStatusApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go index f06f65549..c9a4ff7ab 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go @@ -18,12 +18,17 @@ limitations under the License. package v1alpha1 +import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" +) + // ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use // with apply. type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { - PolicyName *string `json:"policyName,omitempty"` - ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"` - MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"` + PolicyName *string `json:"policyName,omitempty"` + ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"` + MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"` + ValidationActions []admissionregistrationv1alpha1.ValidationAction `json:"validationActions,omitempty"` } // ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with @@ -55,3 +60,13 @@ func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithMatchResour b.MatchResources = value return b } + +// WithValidationActions adds the given value to the ValidationActions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ValidationActions field. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithValidationActions(values ...admissionregistrationv1alpha1.ValidationAction) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + for i := range values { + b.ValidationActions = append(b.ValidationActions, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go index cba1e720c..f674b5b1e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go @@ -29,6 +29,8 @@ type ValidatingAdmissionPolicySpecApplyConfiguration struct { MatchConstraints *MatchResourcesApplyConfiguration `json:"matchConstraints,omitempty"` Validations []ValidationApplyConfiguration `json:"validations,omitempty"` FailurePolicy *admissionregistrationv1alpha1.FailurePolicyType `json:"failurePolicy,omitempty"` + AuditAnnotations []AuditAnnotationApplyConfiguration `json:"auditAnnotations,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } // ValidatingAdmissionPolicySpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicySpec type for use with @@ -73,3 +75,29 @@ func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithFailurePolicy(valu b.FailurePolicy = &value return b } + +// WithAuditAnnotations adds the given value to the AuditAnnotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AuditAnnotations field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithAuditAnnotations(values ...*AuditAnnotationApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAuditAnnotations") + } + b.AuditAnnotations = append(b.AuditAnnotations, *values[i]) + } + return b +} + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go new file mode 100644 index 000000000..821184c8a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ValidatingAdmissionPolicyStatusApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyStatus type for use +// with apply. +type ValidatingAdmissionPolicyStatusApplyConfiguration struct { + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + TypeChecking *TypeCheckingApplyConfiguration `json:"typeChecking,omitempty"` + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// ValidatingAdmissionPolicyStatusApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyStatus type for use with +// apply. +func ValidatingAdmissionPolicyStatus() *ValidatingAdmissionPolicyStatusApplyConfiguration { + return &ValidatingAdmissionPolicyStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithObservedGeneration(value int64) *ValidatingAdmissionPolicyStatusApplyConfiguration { + b.ObservedGeneration = &value + return b +} + +// WithTypeChecking sets the TypeChecking field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TypeChecking field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithTypeChecking(value *TypeCheckingApplyConfiguration) *ValidatingAdmissionPolicyStatusApplyConfiguration { + b.TypeChecking = value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ValidatingAdmissionPolicyStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go index 43916603b..9a5fc8475 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go @@ -25,9 +25,10 @@ import ( // ValidationApplyConfiguration represents an declarative configuration of the Validation type for use // with apply. type ValidationApplyConfiguration struct { - Expression *string `json:"expression,omitempty"` - Message *string `json:"message,omitempty"` - Reason *v1.StatusReason `json:"reason,omitempty"` + Expression *string `json:"expression,omitempty"` + Message *string `json:"message,omitempty"` + Reason *v1.StatusReason `json:"reason,omitempty"` + MessageExpression *string `json:"messageExpression,omitempty"` } // ValidationApplyConfiguration constructs an declarative configuration of the Validation type for use with @@ -59,3 +60,11 @@ func (b *ValidationApplyConfiguration) WithReason(value v1.StatusReason) *Valida b.Reason = &value return b } + +// WithMessageExpression sets the MessageExpression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MessageExpression field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithMessageExpression(value string) *ValidationApplyConfiguration { + b.MessageExpression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go new file mode 100644 index 000000000..d099b6b6e --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use +// with apply. +type MatchConditionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Expression *string `json:"expression,omitempty"` +} + +// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with +// apply. +func MatchCondition() *MatchConditionApplyConfiguration { + return &MatchConditionApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithName(value string) *MatchConditionApplyConfiguration { + b.Name = &value + return b +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithExpression(value string) *MatchConditionApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go index cc48d3b6f..54845341f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go @@ -38,6 +38,7 @@ type MutatingWebhookApplyConfiguration struct { TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"` ReinvocationPolicy *admissionregistrationv1beta1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } // MutatingWebhookApplyConfiguration constructs an declarative configuration of the MutatingWebhook type for use with @@ -140,3 +141,16 @@ func (b *MutatingWebhookApplyConfiguration) WithReinvocationPolicy(value admissi b.ReinvocationPolicy = &value return b } + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *MutatingWebhookApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *MutatingWebhookApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go deleted file mode 100644 index 21151b998..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/admissionregistration/v1beta1" -) - -// RuleApplyConfiguration represents an declarative configuration of the Rule type for use -// with apply. -type RuleApplyConfiguration struct { - APIGroups []string `json:"apiGroups,omitempty"` - APIVersions []string `json:"apiVersions,omitempty"` - Resources []string `json:"resources,omitempty"` - Scope *v1beta1.ScopeType `json:"scope,omitempty"` -} - -// RuleApplyConfiguration constructs an declarative configuration of the Rule type for use with -// apply. -func Rule() *RuleApplyConfiguration { - return &RuleApplyConfiguration{} -} - -// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIGroups field. -func (b *RuleApplyConfiguration) WithAPIGroups(values ...string) *RuleApplyConfiguration { - for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) - } - return b -} - -// WithAPIVersions adds the given value to the APIVersions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIVersions field. -func (b *RuleApplyConfiguration) WithAPIVersions(values ...string) *RuleApplyConfiguration { - for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) - } - return b -} - -// WithResources adds the given value to the Resources field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Resources field. -func (b *RuleApplyConfiguration) WithResources(values ...string) *RuleApplyConfiguration { - for i := range values { - b.Resources = append(b.Resources, values[i]) - } - return b -} - -// WithScope sets the Scope field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleApplyConfiguration) WithScope(value v1beta1.ScopeType) *RuleApplyConfiguration { - b.Scope = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rulewithoperations.go deleted file mode 100644 index 0fd5dd34d..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rulewithoperations.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/api/admissionregistration/v1" - admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" -) - -// RuleWithOperationsApplyConfiguration represents an declarative configuration of the RuleWithOperations type for use -// with apply. -type RuleWithOperationsApplyConfiguration struct { - Operations []v1.OperationType `json:"operations,omitempty"` - admissionregistrationv1.RuleApplyConfiguration `json:",inline"` -} - -// RuleWithOperationsApplyConfiguration constructs an declarative configuration of the RuleWithOperations type for use with -// apply. -func RuleWithOperations() *RuleWithOperationsApplyConfiguration { - return &RuleWithOperationsApplyConfiguration{} -} - -// WithOperations adds the given value to the Operations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Operations field. -func (b *RuleWithOperationsApplyConfiguration) WithOperations(values ...v1.OperationType) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.Operations = append(b.Operations, values[i]) - } - return b -} - -// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIGroups field. -func (b *RuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) - } - return b -} - -// WithAPIVersions adds the given value to the APIVersions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIVersions field. -func (b *RuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) - } - return b -} - -// WithResources adds the given value to the Resources field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Resources field. -func (b *RuleWithOperationsApplyConfiguration) WithResources(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.Resources = append(b.Resources, values[i]) - } - return b -} - -// WithScope sets the Scope field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleWithOperationsApplyConfiguration) WithScope(value v1.ScopeType) *RuleWithOperationsApplyConfiguration { - b.Scope = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go index 84479b5db..8c5c341ba 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go @@ -37,6 +37,7 @@ type ValidatingWebhookApplyConfiguration struct { SideEffects *admissionregistrationv1beta1.SideEffectClass `json:"sideEffects,omitempty"` TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } // ValidatingWebhookApplyConfiguration constructs an declarative configuration of the ValidatingWebhook type for use with @@ -131,3 +132,16 @@ func (b *ValidatingWebhookApplyConfiguration) WithAdmissionReviewVersions(values } return b } + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *ValidatingWebhookApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *ValidatingWebhookApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go deleted file mode 100644 index 86601cc48..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v2 - -import ( - v1 "k8s.io/api/core/v1" -) - -// PodResourceMetricSourceApplyConfiguration represents an declarative configuration of the PodResourceMetricSource type for use -// with apply. -type PodResourceMetricSourceApplyConfiguration struct { - Name *v1.ResourceName `json:"name,omitempty"` - Target *MetricTargetApplyConfiguration `json:"target,omitempty"` -} - -// PodResourceMetricSourceApplyConfiguration constructs an declarative configuration of the PodResourceMetricSource type for use with -// apply. -func PodResourceMetricSource() *PodResourceMetricSourceApplyConfiguration { - return &PodResourceMetricSourceApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *PodResourceMetricSourceApplyConfiguration) WithName(value v1.ResourceName) *PodResourceMetricSourceApplyConfiguration { - b.Name = &value - return b -} - -// WithTarget sets the Target field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Target field is set to the value of the last call. -func (b *PodResourceMetricSourceApplyConfiguration) WithTarget(value *MetricTargetApplyConfiguration) *PodResourceMetricSourceApplyConfiguration { - b.Target = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podscheduling.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go similarity index 64% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podscheduling.go rename to vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go index 44890c2d9..788d2a07d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podscheduling.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go @@ -19,7 +19,7 @@ limitations under the License. package v1alpha1 import ( - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,66 +27,63 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodSchedulingApplyConfiguration represents an declarative configuration of the PodScheduling type for use +// ClusterTrustBundleApplyConfiguration represents an declarative configuration of the ClusterTrustBundle type for use // with apply. -type PodSchedulingApplyConfiguration struct { +type ClusterTrustBundleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PodSchedulingSpecApplyConfiguration `json:"spec,omitempty"` - Status *PodSchedulingStatusApplyConfiguration `json:"status,omitempty"` + Spec *ClusterTrustBundleSpecApplyConfiguration `json:"spec,omitempty"` } -// PodScheduling constructs an declarative configuration of the PodScheduling type for use with +// ClusterTrustBundle constructs an declarative configuration of the ClusterTrustBundle type for use with // apply. -func PodScheduling(name, namespace string) *PodSchedulingApplyConfiguration { - b := &PodSchedulingApplyConfiguration{} +func ClusterTrustBundle(name string) *ClusterTrustBundleApplyConfiguration { + b := &ClusterTrustBundleApplyConfiguration{} b.WithName(name) - b.WithNamespace(namespace) - b.WithKind("PodScheduling") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithKind("ClusterTrustBundle") + b.WithAPIVersion("certificates.k8s.io/v1alpha1") return b } -// ExtractPodScheduling extracts the applied configuration owned by fieldManager from -// podScheduling. If no managedFields are found in podScheduling for fieldManager, a -// PodSchedulingApplyConfiguration is returned with only the Name, Namespace (if applicable), +// ExtractClusterTrustBundle extracts the applied configuration owned by fieldManager from +// clusterTrustBundle. If no managedFields are found in clusterTrustBundle for fieldManager, a +// ClusterTrustBundleApplyConfiguration is returned with only the Name, Namespace (if applicable), // APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. -// podScheduling must be a unmodified PodScheduling API object that was retrieved from the Kubernetes API. -// ExtractPodScheduling provides a way to perform a extract/modify-in-place/apply workflow. +// clusterTrustBundle must be a unmodified ClusterTrustBundle API object that was retrieved from the Kubernetes API. +// ExtractClusterTrustBundle provides a way to perform a extract/modify-in-place/apply workflow. // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPodScheduling(podScheduling *resourcev1alpha1.PodScheduling, fieldManager string) (*PodSchedulingApplyConfiguration, error) { - return extractPodScheduling(podScheduling, fieldManager, "") +func ExtractClusterTrustBundle(clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, fieldManager string) (*ClusterTrustBundleApplyConfiguration, error) { + return extractClusterTrustBundle(clusterTrustBundle, fieldManager, "") } -// ExtractPodSchedulingStatus is the same as ExtractPodScheduling except +// ExtractClusterTrustBundleStatus is the same as ExtractClusterTrustBundle except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPodSchedulingStatus(podScheduling *resourcev1alpha1.PodScheduling, fieldManager string) (*PodSchedulingApplyConfiguration, error) { - return extractPodScheduling(podScheduling, fieldManager, "status") +func ExtractClusterTrustBundleStatus(clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, fieldManager string) (*ClusterTrustBundleApplyConfiguration, error) { + return extractClusterTrustBundle(clusterTrustBundle, fieldManager, "status") } -func extractPodScheduling(podScheduling *resourcev1alpha1.PodScheduling, fieldManager string, subresource string) (*PodSchedulingApplyConfiguration, error) { - b := &PodSchedulingApplyConfiguration{} - err := managedfields.ExtractInto(podScheduling, internal.Parser().Type("io.k8s.api.resource.v1alpha1.PodScheduling"), fieldManager, b, subresource) +func extractClusterTrustBundle(clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, fieldManager string, subresource string) (*ClusterTrustBundleApplyConfiguration, error) { + b := &ClusterTrustBundleApplyConfiguration{} + err := managedfields.ExtractInto(clusterTrustBundle, internal.Parser().Type("io.k8s.api.certificates.v1alpha1.ClusterTrustBundle"), fieldManager, b, subresource) if err != nil { return nil, err } - b.WithName(podScheduling.Name) - b.WithNamespace(podScheduling.Namespace) + b.WithName(clusterTrustBundle.Name) - b.WithKind("PodScheduling") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithKind("ClusterTrustBundle") + b.WithAPIVersion("certificates.k8s.io/v1alpha1") return b, nil } // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithKind(value string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithKind(value string) *ClusterTrustBundleApplyConfiguration { b.Kind = &value return b } @@ -94,7 +91,7 @@ func (b *PodSchedulingApplyConfiguration) WithKind(value string) *PodSchedulingA // WithAPIVersion sets the APIVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithAPIVersion(value string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithAPIVersion(value string) *ClusterTrustBundleApplyConfiguration { b.APIVersion = &value return b } @@ -102,7 +99,7 @@ func (b *PodSchedulingApplyConfiguration) WithAPIVersion(value string) *PodSched // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithName(value string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithName(value string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Name = &value return b @@ -111,7 +108,7 @@ func (b *PodSchedulingApplyConfiguration) WithName(value string) *PodSchedulingA // WithGenerateName sets the GenerateName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GenerateName field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithGenerateName(value string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithGenerateName(value string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.GenerateName = &value return b @@ -120,7 +117,7 @@ func (b *PodSchedulingApplyConfiguration) WithGenerateName(value string) *PodSch // WithNamespace sets the Namespace field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Namespace field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithNamespace(value string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithNamespace(value string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Namespace = &value return b @@ -129,7 +126,7 @@ func (b *PodSchedulingApplyConfiguration) WithNamespace(value string) *PodSchedu // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithUID(value types.UID) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithUID(value types.UID) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.UID = &value return b @@ -138,7 +135,7 @@ func (b *PodSchedulingApplyConfiguration) WithUID(value types.UID) *PodSchedulin // WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithResourceVersion(value string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithResourceVersion(value string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.ResourceVersion = &value return b @@ -147,7 +144,7 @@ func (b *PodSchedulingApplyConfiguration) WithResourceVersion(value string) *Pod // WithGeneration sets the Generation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Generation field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithGeneration(value int64) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithGeneration(value int64) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Generation = &value return b @@ -156,7 +153,7 @@ func (b *PodSchedulingApplyConfiguration) WithGeneration(value int64) *PodSchedu // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.CreationTimestamp = &value return b @@ -165,7 +162,7 @@ func (b *PodSchedulingApplyConfiguration) WithCreationTimestamp(value metav1.Tim // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionTimestamp = &value return b @@ -174,7 +171,7 @@ func (b *PodSchedulingApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionGracePeriodSeconds = &value return b @@ -184,7 +181,7 @@ func (b *PodSchedulingApplyConfiguration) WithDeletionGracePeriodSeconds(value i // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Labels field, // overwriting an existing map entries in Labels field with the same key. -func (b *PodSchedulingApplyConfiguration) WithLabels(entries map[string]string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithLabels(entries map[string]string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Labels == nil && len(entries) > 0 { b.Labels = make(map[string]string, len(entries)) @@ -199,7 +196,7 @@ func (b *PodSchedulingApplyConfiguration) WithLabels(entries map[string]string) // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Annotations field, // overwriting an existing map entries in Annotations field with the same key. -func (b *PodSchedulingApplyConfiguration) WithAnnotations(entries map[string]string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Annotations == nil && len(entries) > 0 { b.Annotations = make(map[string]string, len(entries)) @@ -213,7 +210,7 @@ func (b *PodSchedulingApplyConfiguration) WithAnnotations(entries map[string]str // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PodSchedulingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { @@ -227,7 +224,7 @@ func (b *PodSchedulingApplyConfiguration) WithOwnerReferences(values ...*v1.Owne // WithFinalizers adds the given value to the Finalizers field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *PodSchedulingApplyConfiguration) WithFinalizers(values ...string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithFinalizers(values ...string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { b.Finalizers = append(b.Finalizers, values[i]) @@ -235,7 +232,7 @@ func (b *PodSchedulingApplyConfiguration) WithFinalizers(values ...string) *PodS return b } -func (b *PodSchedulingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { +func (b *ClusterTrustBundleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} } @@ -244,15 +241,7 @@ func (b *PodSchedulingApplyConfiguration) ensureObjectMetaApplyConfigurationExis // WithSpec sets the Spec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Spec field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithSpec(value *PodSchedulingSpecApplyConfiguration) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithSpec(value *ClusterTrustBundleSpecApplyConfiguration) *ClusterTrustBundleApplyConfiguration { b.Spec = value return b } - -// WithStatus sets the Status field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Status field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithStatus(value *PodSchedulingStatusApplyConfiguration) *PodSchedulingApplyConfiguration { - b.Status = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go new file mode 100644 index 000000000..d1aea1d6d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ClusterTrustBundleSpecApplyConfiguration represents an declarative configuration of the ClusterTrustBundleSpec type for use +// with apply. +type ClusterTrustBundleSpecApplyConfiguration struct { + SignerName *string `json:"signerName,omitempty"` + TrustBundle *string `json:"trustBundle,omitempty"` +} + +// ClusterTrustBundleSpecApplyConfiguration constructs an declarative configuration of the ClusterTrustBundleSpec type for use with +// apply. +func ClusterTrustBundleSpec() *ClusterTrustBundleSpecApplyConfiguration { + return &ClusterTrustBundleSpecApplyConfiguration{} +} + +// WithSignerName sets the SignerName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SignerName field is set to the value of the last call. +func (b *ClusterTrustBundleSpecApplyConfiguration) WithSignerName(value string) *ClusterTrustBundleSpecApplyConfiguration { + b.SignerName = &value + return b +} + +// WithTrustBundle sets the TrustBundle field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TrustBundle field is set to the value of the last call. +func (b *ClusterTrustBundleSpecApplyConfiguration) WithTrustBundle(value string) *ClusterTrustBundleSpecApplyConfiguration { + b.TrustBundle = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go index d3b066d9c..9ada59ee2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go @@ -25,28 +25,29 @@ import ( // ContainerApplyConfiguration represents an declarative configuration of the Container type for use // with apply. type ContainerApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Image *string `json:"image,omitempty"` - Command []string `json:"command,omitempty"` - Args []string `json:"args,omitempty"` - WorkingDir *string `json:"workingDir,omitempty"` - Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"` - EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"` - Env []EnvVarApplyConfiguration `json:"env,omitempty"` - Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` - VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"` - VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"` - LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"` - ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"` - StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"` - Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"` - TerminationMessagePath *string `json:"terminationMessagePath,omitempty"` - TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"` - ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` - SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"` - Stdin *bool `json:"stdin,omitempty"` - StdinOnce *bool `json:"stdinOnce,omitempty"` - TTY *bool `json:"tty,omitempty"` + Name *string `json:"name,omitempty"` + Image *string `json:"image,omitempty"` + Command []string `json:"command,omitempty"` + Args []string `json:"args,omitempty"` + WorkingDir *string `json:"workingDir,omitempty"` + Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"` + EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"` + Env []EnvVarApplyConfiguration `json:"env,omitempty"` + Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` + ResizePolicy []ContainerResizePolicyApplyConfiguration `json:"resizePolicy,omitempty"` + VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"` + VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"` + LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"` + ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"` + StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"` + Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"` + TerminationMessagePath *string `json:"terminationMessagePath,omitempty"` + TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"` + ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"` + Stdin *bool `json:"stdin,omitempty"` + StdinOnce *bool `json:"stdinOnce,omitempty"` + TTY *bool `json:"tty,omitempty"` } // ContainerApplyConfiguration constructs an declarative configuration of the Container type for use with @@ -146,6 +147,19 @@ func (b *ContainerApplyConfiguration) WithResources(value *ResourceRequirementsA return b } +// WithResizePolicy adds the given value to the ResizePolicy field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResizePolicy field. +func (b *ContainerApplyConfiguration) WithResizePolicy(values ...*ContainerResizePolicyApplyConfiguration) *ContainerApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResizePolicy") + } + b.ResizePolicy = append(b.ResizePolicy, *values[i]) + } + return b +} + // WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the VolumeMounts field. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go new file mode 100644 index 000000000..bbbcbc9f1 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ContainerResizePolicyApplyConfiguration represents an declarative configuration of the ContainerResizePolicy type for use +// with apply. +type ContainerResizePolicyApplyConfiguration struct { + ResourceName *v1.ResourceName `json:"resourceName,omitempty"` + RestartPolicy *v1.ResourceResizeRestartPolicy `json:"restartPolicy,omitempty"` +} + +// ContainerResizePolicyApplyConfiguration constructs an declarative configuration of the ContainerResizePolicy type for use with +// apply. +func ContainerResizePolicy() *ContainerResizePolicyApplyConfiguration { + return &ContainerResizePolicyApplyConfiguration{} +} + +// WithResourceName sets the ResourceName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceName field is set to the value of the last call. +func (b *ContainerResizePolicyApplyConfiguration) WithResourceName(value v1.ResourceName) *ContainerResizePolicyApplyConfiguration { + b.ResourceName = &value + return b +} + +// WithRestartPolicy sets the RestartPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RestartPolicy field is set to the value of the last call. +func (b *ContainerResizePolicyApplyConfiguration) WithRestartPolicy(value v1.ResourceResizeRestartPolicy) *ContainerResizePolicyApplyConfiguration { + b.RestartPolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go index 18d2925c1..2b98c4658 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go @@ -18,18 +18,24 @@ limitations under the License. package v1 +import ( + corev1 "k8s.io/api/core/v1" +) + // ContainerStatusApplyConfiguration represents an declarative configuration of the ContainerStatus type for use // with apply. type ContainerStatusApplyConfiguration struct { - Name *string `json:"name,omitempty"` - State *ContainerStateApplyConfiguration `json:"state,omitempty"` - LastTerminationState *ContainerStateApplyConfiguration `json:"lastState,omitempty"` - Ready *bool `json:"ready,omitempty"` - RestartCount *int32 `json:"restartCount,omitempty"` - Image *string `json:"image,omitempty"` - ImageID *string `json:"imageID,omitempty"` - ContainerID *string `json:"containerID,omitempty"` - Started *bool `json:"started,omitempty"` + Name *string `json:"name,omitempty"` + State *ContainerStateApplyConfiguration `json:"state,omitempty"` + LastTerminationState *ContainerStateApplyConfiguration `json:"lastState,omitempty"` + Ready *bool `json:"ready,omitempty"` + RestartCount *int32 `json:"restartCount,omitempty"` + Image *string `json:"image,omitempty"` + ImageID *string `json:"imageID,omitempty"` + ContainerID *string `json:"containerID,omitempty"` + Started *bool `json:"started,omitempty"` + AllocatedResources *corev1.ResourceList `json:"allocatedResources,omitempty"` + Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` } // ContainerStatusApplyConfiguration constructs an declarative configuration of the ContainerStatus type for use with @@ -109,3 +115,19 @@ func (b *ContainerStatusApplyConfiguration) WithStarted(value bool) *ContainerSt b.Started = &value return b } + +// WithAllocatedResources sets the AllocatedResources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllocatedResources field is set to the value of the last call. +func (b *ContainerStatusApplyConfiguration) WithAllocatedResources(value corev1.ResourceList) *ContainerStatusApplyConfiguration { + b.AllocatedResources = &value + return b +} + +// WithResources sets the Resources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resources field is set to the value of the last call. +func (b *ContainerStatusApplyConfiguration) WithResources(value *ResourceRequirementsApplyConfiguration) *ContainerStatusApplyConfiguration { + b.Resources = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go index 6c24cd419..c51049ba1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go @@ -126,6 +126,19 @@ func (b *EphemeralContainerApplyConfiguration) WithResources(value *ResourceRequ return b } +// WithResizePolicy adds the given value to the ResizePolicy field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResizePolicy field. +func (b *EphemeralContainerApplyConfiguration) WithResizePolicy(values ...*ContainerResizePolicyApplyConfiguration) *EphemeralContainerApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResizePolicy") + } + b.ResizePolicy = append(b.ResizePolicy, *values[i]) + } + return b +} + // WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the VolumeMounts field. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go index 67e658cfa..764b830e0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go @@ -25,28 +25,29 @@ import ( // EphemeralContainerCommonApplyConfiguration represents an declarative configuration of the EphemeralContainerCommon type for use // with apply. type EphemeralContainerCommonApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Image *string `json:"image,omitempty"` - Command []string `json:"command,omitempty"` - Args []string `json:"args,omitempty"` - WorkingDir *string `json:"workingDir,omitempty"` - Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"` - EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"` - Env []EnvVarApplyConfiguration `json:"env,omitempty"` - Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` - VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"` - VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"` - LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"` - ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"` - StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"` - Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"` - TerminationMessagePath *string `json:"terminationMessagePath,omitempty"` - TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"` - ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` - SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"` - Stdin *bool `json:"stdin,omitempty"` - StdinOnce *bool `json:"stdinOnce,omitempty"` - TTY *bool `json:"tty,omitempty"` + Name *string `json:"name,omitempty"` + Image *string `json:"image,omitempty"` + Command []string `json:"command,omitempty"` + Args []string `json:"args,omitempty"` + WorkingDir *string `json:"workingDir,omitempty"` + Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"` + EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"` + Env []EnvVarApplyConfiguration `json:"env,omitempty"` + Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` + ResizePolicy []ContainerResizePolicyApplyConfiguration `json:"resizePolicy,omitempty"` + VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"` + VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"` + LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"` + ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"` + StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"` + Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"` + TerminationMessagePath *string `json:"terminationMessagePath,omitempty"` + TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"` + ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"` + Stdin *bool `json:"stdin,omitempty"` + StdinOnce *bool `json:"stdinOnce,omitempty"` + TTY *bool `json:"tty,omitempty"` } // EphemeralContainerCommonApplyConfiguration constructs an declarative configuration of the EphemeralContainerCommon type for use with @@ -146,6 +147,19 @@ func (b *EphemeralContainerCommonApplyConfiguration) WithResources(value *Resour return b } +// WithResizePolicy adds the given value to the ResizePolicy field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResizePolicy field. +func (b *EphemeralContainerCommonApplyConfiguration) WithResizePolicy(values ...*ContainerResizePolicyApplyConfiguration) *EphemeralContainerCommonApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResizePolicy") + } + b.ResizePolicy = append(b.ResizePolicy, *values[i]) + } + return b +} + // WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the VolumeMounts field. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go index 7ee5b9955..e9d8e5b28 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go @@ -39,6 +39,7 @@ type PodStatusApplyConfiguration struct { ContainerStatuses []ContainerStatusApplyConfiguration `json:"containerStatuses,omitempty"` QOSClass *v1.PodQOSClass `json:"qosClass,omitempty"` EphemeralContainerStatuses []ContainerStatusApplyConfiguration `json:"ephemeralContainerStatuses,omitempty"` + Resize *v1.PodResizeStatus `json:"resize,omitempty"` } // PodStatusApplyConfiguration constructs an declarative configuration of the PodStatus type for use with @@ -175,3 +176,11 @@ func (b *PodStatusApplyConfiguration) WithEphemeralContainerStatuses(values ...* } return b } + +// WithResize sets the Resize field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resize field is set to the value of the last call. +func (b *PodStatusApplyConfiguration) WithResize(value v1.PodResizeStatus) *PodStatusApplyConfiguration { + b.Resize = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go index db376b941..493af6fb3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go @@ -35,7 +35,7 @@ type ServiceSpecApplyConfiguration struct { LoadBalancerIP *string `json:"loadBalancerIP,omitempty"` LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty"` ExternalName *string `json:"externalName,omitempty"` - ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty"` + ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicy `json:"externalTrafficPolicy,omitempty"` HealthCheckNodePort *int32 `json:"healthCheckNodePort,omitempty"` PublishNotReadyAddresses *bool `json:"publishNotReadyAddresses,omitempty"` SessionAffinityConfig *SessionAffinityConfigApplyConfiguration `json:"sessionAffinityConfig,omitempty"` @@ -43,7 +43,7 @@ type ServiceSpecApplyConfiguration struct { IPFamilyPolicy *corev1.IPFamilyPolicy `json:"ipFamilyPolicy,omitempty"` AllocateLoadBalancerNodePorts *bool `json:"allocateLoadBalancerNodePorts,omitempty"` LoadBalancerClass *string `json:"loadBalancerClass,omitempty"` - InternalTrafficPolicy *corev1.ServiceInternalTrafficPolicyType `json:"internalTrafficPolicy,omitempty"` + InternalTrafficPolicy *corev1.ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty"` } // ServiceSpecApplyConfiguration constructs an declarative configuration of the ServiceSpec type for use with @@ -152,7 +152,7 @@ func (b *ServiceSpecApplyConfiguration) WithExternalName(value string) *ServiceS // WithExternalTrafficPolicy sets the ExternalTrafficPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ExternalTrafficPolicy field is set to the value of the last call. -func (b *ServiceSpecApplyConfiguration) WithExternalTrafficPolicy(value corev1.ServiceExternalTrafficPolicyType) *ServiceSpecApplyConfiguration { +func (b *ServiceSpecApplyConfiguration) WithExternalTrafficPolicy(value corev1.ServiceExternalTrafficPolicy) *ServiceSpecApplyConfiguration { b.ExternalTrafficPolicy = &value return b } @@ -218,7 +218,7 @@ func (b *ServiceSpecApplyConfiguration) WithLoadBalancerClass(value string) *Ser // WithInternalTrafficPolicy sets the InternalTrafficPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the InternalTrafficPolicy field is set to the value of the last call. -func (b *ServiceSpecApplyConfiguration) WithInternalTrafficPolicy(value corev1.ServiceInternalTrafficPolicyType) *ServiceSpecApplyConfiguration { +func (b *ServiceSpecApplyConfiguration) WithInternalTrafficPolicy(value corev1.ServiceInternalTrafficPolicy) *ServiceSpecApplyConfiguration { b.InternalTrafficPolicy = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedcsidriver.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedcsidriver.go deleted file mode 100644 index 27b49bf15..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedcsidriver.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// AllowedCSIDriverApplyConfiguration represents an declarative configuration of the AllowedCSIDriver type for use -// with apply. -type AllowedCSIDriverApplyConfiguration struct { - Name *string `json:"name,omitempty"` -} - -// AllowedCSIDriverApplyConfiguration constructs an declarative configuration of the AllowedCSIDriver type for use with -// apply. -func AllowedCSIDriver() *AllowedCSIDriverApplyConfiguration { - return &AllowedCSIDriverApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *AllowedCSIDriverApplyConfiguration) WithName(value string) *AllowedCSIDriverApplyConfiguration { - b.Name = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedflexvolume.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedflexvolume.go deleted file mode 100644 index 30c3724cf..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedflexvolume.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// AllowedFlexVolumeApplyConfiguration represents an declarative configuration of the AllowedFlexVolume type for use -// with apply. -type AllowedFlexVolumeApplyConfiguration struct { - Driver *string `json:"driver,omitempty"` -} - -// AllowedFlexVolumeApplyConfiguration constructs an declarative configuration of the AllowedFlexVolume type for use with -// apply. -func AllowedFlexVolume() *AllowedFlexVolumeApplyConfiguration { - return &AllowedFlexVolumeApplyConfiguration{} -} - -// WithDriver sets the Driver field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Driver field is set to the value of the last call. -func (b *AllowedFlexVolumeApplyConfiguration) WithDriver(value string) *AllowedFlexVolumeApplyConfiguration { - b.Driver = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go deleted file mode 100644 index 493815d8d..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// AllowedHostPathApplyConfiguration represents an declarative configuration of the AllowedHostPath type for use -// with apply. -type AllowedHostPathApplyConfiguration struct { - PathPrefix *string `json:"pathPrefix,omitempty"` - ReadOnly *bool `json:"readOnly,omitempty"` -} - -// AllowedHostPathApplyConfiguration constructs an declarative configuration of the AllowedHostPath type for use with -// apply. -func AllowedHostPath() *AllowedHostPathApplyConfiguration { - return &AllowedHostPathApplyConfiguration{} -} - -// WithPathPrefix sets the PathPrefix field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the PathPrefix field is set to the value of the last call. -func (b *AllowedHostPathApplyConfiguration) WithPathPrefix(value string) *AllowedHostPathApplyConfiguration { - b.PathPrefix = &value - return b -} - -// WithReadOnly sets the ReadOnly field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ReadOnly field is set to the value of the last call. -func (b *AllowedHostPathApplyConfiguration) WithReadOnly(value bool) *AllowedHostPathApplyConfiguration { - b.ReadOnly = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go deleted file mode 100644 index c7434a6af..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// FSGroupStrategyOptionsApplyConfiguration represents an declarative configuration of the FSGroupStrategyOptions type for use -// with apply. -type FSGroupStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.FSGroupStrategyType `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// FSGroupStrategyOptionsApplyConfiguration constructs an declarative configuration of the FSGroupStrategyOptions type for use with -// apply. -func FSGroupStrategyOptions() *FSGroupStrategyOptionsApplyConfiguration { - return &FSGroupStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *FSGroupStrategyOptionsApplyConfiguration) WithRule(value v1beta1.FSGroupStrategyType) *FSGroupStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *FSGroupStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *FSGroupStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go deleted file mode 100644 index 7c7968813..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// HostPortRangeApplyConfiguration represents an declarative configuration of the HostPortRange type for use -// with apply. -type HostPortRangeApplyConfiguration struct { - Min *int32 `json:"min,omitempty"` - Max *int32 `json:"max,omitempty"` -} - -// HostPortRangeApplyConfiguration constructs an declarative configuration of the HostPortRange type for use with -// apply. -func HostPortRange() *HostPortRangeApplyConfiguration { - return &HostPortRangeApplyConfiguration{} -} - -// WithMin sets the Min field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Min field is set to the value of the last call. -func (b *HostPortRangeApplyConfiguration) WithMin(value int32) *HostPortRangeApplyConfiguration { - b.Min = &value - return b -} - -// WithMax sets the Max field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Max field is set to the value of the last call. -func (b *HostPortRangeApplyConfiguration) WithMax(value int32) *HostPortRangeApplyConfiguration { - b.Max = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go deleted file mode 100644 index af46f7658..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// IDRangeApplyConfiguration represents an declarative configuration of the IDRange type for use -// with apply. -type IDRangeApplyConfiguration struct { - Min *int64 `json:"min,omitempty"` - Max *int64 `json:"max,omitempty"` -} - -// IDRangeApplyConfiguration constructs an declarative configuration of the IDRange type for use with -// apply. -func IDRange() *IDRangeApplyConfiguration { - return &IDRangeApplyConfiguration{} -} - -// WithMin sets the Min field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Min field is set to the value of the last call. -func (b *IDRangeApplyConfiguration) WithMin(value int64) *IDRangeApplyConfiguration { - b.Min = &value - return b -} - -// WithMax sets the Max field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Max field is set to the value of the last call. -func (b *IDRangeApplyConfiguration) WithMax(value int64) *IDRangeApplyConfiguration { - b.Max = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go deleted file mode 100644 index de3949dc9..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go +++ /dev/null @@ -1,285 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// PodSecurityPolicySpecApplyConfiguration represents an declarative configuration of the PodSecurityPolicySpec type for use -// with apply. -type PodSecurityPolicySpecApplyConfiguration struct { - Privileged *bool `json:"privileged,omitempty"` - DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty"` - RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty"` - AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty"` - Volumes []v1beta1.FSType `json:"volumes,omitempty"` - HostNetwork *bool `json:"hostNetwork,omitempty"` - HostPorts []HostPortRangeApplyConfiguration `json:"hostPorts,omitempty"` - HostPID *bool `json:"hostPID,omitempty"` - HostIPC *bool `json:"hostIPC,omitempty"` - SELinux *SELinuxStrategyOptionsApplyConfiguration `json:"seLinux,omitempty"` - RunAsUser *RunAsUserStrategyOptionsApplyConfiguration `json:"runAsUser,omitempty"` - RunAsGroup *RunAsGroupStrategyOptionsApplyConfiguration `json:"runAsGroup,omitempty"` - SupplementalGroups *SupplementalGroupsStrategyOptionsApplyConfiguration `json:"supplementalGroups,omitempty"` - FSGroup *FSGroupStrategyOptionsApplyConfiguration `json:"fsGroup,omitempty"` - ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty"` - DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty"` - AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty"` - AllowedHostPaths []AllowedHostPathApplyConfiguration `json:"allowedHostPaths,omitempty"` - AllowedFlexVolumes []AllowedFlexVolumeApplyConfiguration `json:"allowedFlexVolumes,omitempty"` - AllowedCSIDrivers []AllowedCSIDriverApplyConfiguration `json:"allowedCSIDrivers,omitempty"` - AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty"` - ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty"` - AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty"` - RuntimeClass *RuntimeClassStrategyOptionsApplyConfiguration `json:"runtimeClass,omitempty"` -} - -// PodSecurityPolicySpecApplyConfiguration constructs an declarative configuration of the PodSecurityPolicySpec type for use with -// apply. -func PodSecurityPolicySpec() *PodSecurityPolicySpecApplyConfiguration { - return &PodSecurityPolicySpecApplyConfiguration{} -} - -// WithPrivileged sets the Privileged field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Privileged field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithPrivileged(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.Privileged = &value - return b -} - -// WithDefaultAddCapabilities adds the given value to the DefaultAddCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the DefaultAddCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithDefaultAddCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.DefaultAddCapabilities = append(b.DefaultAddCapabilities, values[i]) - } - return b -} - -// WithRequiredDropCapabilities adds the given value to the RequiredDropCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the RequiredDropCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRequiredDropCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.RequiredDropCapabilities = append(b.RequiredDropCapabilities, values[i]) - } - return b -} - -// WithAllowedCapabilities adds the given value to the AllowedCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedCapabilities = append(b.AllowedCapabilities, values[i]) - } - return b -} - -// WithVolumes adds the given value to the Volumes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Volumes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithVolumes(values ...v1beta1.FSType) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.Volumes = append(b.Volumes, values[i]) - } - return b -} - -// WithHostNetwork sets the HostNetwork field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostNetwork field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostNetwork(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostNetwork = &value - return b -} - -// WithHostPorts adds the given value to the HostPorts field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the HostPorts field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostPorts(values ...*HostPortRangeApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithHostPorts") - } - b.HostPorts = append(b.HostPorts, *values[i]) - } - return b -} - -// WithHostPID sets the HostPID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostPID field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostPID(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostPID = &value - return b -} - -// WithHostIPC sets the HostIPC field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostIPC field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostIPC(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostIPC = &value - return b -} - -// WithSELinux sets the SELinux field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SELinux field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithSELinux(value *SELinuxStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.SELinux = value - return b -} - -// WithRunAsUser sets the RunAsUser field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RunAsUser field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRunAsUser(value *RunAsUserStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RunAsUser = value - return b -} - -// WithRunAsGroup sets the RunAsGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RunAsGroup field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRunAsGroup(value *RunAsGroupStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RunAsGroup = value - return b -} - -// WithSupplementalGroups sets the SupplementalGroups field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SupplementalGroups field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithSupplementalGroups(value *SupplementalGroupsStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.SupplementalGroups = value - return b -} - -// WithFSGroup sets the FSGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the FSGroup field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithFSGroup(value *FSGroupStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.FSGroup = value - return b -} - -// WithReadOnlyRootFilesystem sets the ReadOnlyRootFilesystem field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ReadOnlyRootFilesystem field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithReadOnlyRootFilesystem(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.ReadOnlyRootFilesystem = &value - return b -} - -// WithDefaultAllowPrivilegeEscalation sets the DefaultAllowPrivilegeEscalation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DefaultAllowPrivilegeEscalation field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithDefaultAllowPrivilegeEscalation(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.DefaultAllowPrivilegeEscalation = &value - return b -} - -// WithAllowPrivilegeEscalation sets the AllowPrivilegeEscalation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AllowPrivilegeEscalation field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowPrivilegeEscalation(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.AllowPrivilegeEscalation = &value - return b -} - -// WithAllowedHostPaths adds the given value to the AllowedHostPaths field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedHostPaths field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedHostPaths(values ...*AllowedHostPathApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedHostPaths") - } - b.AllowedHostPaths = append(b.AllowedHostPaths, *values[i]) - } - return b -} - -// WithAllowedFlexVolumes adds the given value to the AllowedFlexVolumes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedFlexVolumes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedFlexVolumes(values ...*AllowedFlexVolumeApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedFlexVolumes") - } - b.AllowedFlexVolumes = append(b.AllowedFlexVolumes, *values[i]) - } - return b -} - -// WithAllowedCSIDrivers adds the given value to the AllowedCSIDrivers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedCSIDrivers field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedCSIDrivers(values ...*AllowedCSIDriverApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedCSIDrivers") - } - b.AllowedCSIDrivers = append(b.AllowedCSIDrivers, *values[i]) - } - return b -} - -// WithAllowedUnsafeSysctls adds the given value to the AllowedUnsafeSysctls field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedUnsafeSysctls field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedUnsafeSysctls(values ...string) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedUnsafeSysctls = append(b.AllowedUnsafeSysctls, values[i]) - } - return b -} - -// WithForbiddenSysctls adds the given value to the ForbiddenSysctls field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ForbiddenSysctls field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithForbiddenSysctls(values ...string) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.ForbiddenSysctls = append(b.ForbiddenSysctls, values[i]) - } - return b -} - -// WithAllowedProcMountTypes adds the given value to the AllowedProcMountTypes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedProcMountTypes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedProcMountTypes(values ...v1.ProcMountType) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedProcMountTypes = append(b.AllowedProcMountTypes, values[i]) - } - return b -} - -// WithRuntimeClass sets the RuntimeClass field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RuntimeClass field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRuntimeClass(value *RuntimeClassStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RuntimeClass = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go deleted file mode 100644 index 75e76e85f..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// RunAsGroupStrategyOptionsApplyConfiguration represents an declarative configuration of the RunAsGroupStrategyOptions type for use -// with apply. -type RunAsGroupStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.RunAsGroupStrategy `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// RunAsGroupStrategyOptionsApplyConfiguration constructs an declarative configuration of the RunAsGroupStrategyOptions type for use with -// apply. -func RunAsGroupStrategyOptions() *RunAsGroupStrategyOptionsApplyConfiguration { - return &RunAsGroupStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *RunAsGroupStrategyOptionsApplyConfiguration) WithRule(value v1beta1.RunAsGroupStrategy) *RunAsGroupStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *RunAsGroupStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *RunAsGroupStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go deleted file mode 100644 index 712c1675a..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// RunAsUserStrategyOptionsApplyConfiguration represents an declarative configuration of the RunAsUserStrategyOptions type for use -// with apply. -type RunAsUserStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.RunAsUserStrategy `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// RunAsUserStrategyOptionsApplyConfiguration constructs an declarative configuration of the RunAsUserStrategyOptions type for use with -// apply. -func RunAsUserStrategyOptions() *RunAsUserStrategyOptionsApplyConfiguration { - return &RunAsUserStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *RunAsUserStrategyOptionsApplyConfiguration) WithRule(value v1beta1.RunAsUserStrategy) *RunAsUserStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *RunAsUserStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *RunAsUserStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go deleted file mode 100644 index c19a7ce61..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// RuntimeClassStrategyOptionsApplyConfiguration represents an declarative configuration of the RuntimeClassStrategyOptions type for use -// with apply. -type RuntimeClassStrategyOptionsApplyConfiguration struct { - AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames,omitempty"` - DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty"` -} - -// RuntimeClassStrategyOptionsApplyConfiguration constructs an declarative configuration of the RuntimeClassStrategyOptions type for use with -// apply. -func RuntimeClassStrategyOptions() *RuntimeClassStrategyOptionsApplyConfiguration { - return &RuntimeClassStrategyOptionsApplyConfiguration{} -} - -// WithAllowedRuntimeClassNames adds the given value to the AllowedRuntimeClassNames field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedRuntimeClassNames field. -func (b *RuntimeClassStrategyOptionsApplyConfiguration) WithAllowedRuntimeClassNames(values ...string) *RuntimeClassStrategyOptionsApplyConfiguration { - for i := range values { - b.AllowedRuntimeClassNames = append(b.AllowedRuntimeClassNames, values[i]) - } - return b -} - -// WithDefaultRuntimeClassName sets the DefaultRuntimeClassName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DefaultRuntimeClassName field is set to the value of the last call. -func (b *RuntimeClassStrategyOptionsApplyConfiguration) WithDefaultRuntimeClassName(value string) *RuntimeClassStrategyOptionsApplyConfiguration { - b.DefaultRuntimeClassName = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go deleted file mode 100644 index 265906a73..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - -// SELinuxStrategyOptionsApplyConfiguration represents an declarative configuration of the SELinuxStrategyOptions type for use -// with apply. -type SELinuxStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.SELinuxStrategy `json:"rule,omitempty"` - SELinuxOptions *v1.SELinuxOptionsApplyConfiguration `json:"seLinuxOptions,omitempty"` -} - -// SELinuxStrategyOptionsApplyConfiguration constructs an declarative configuration of the SELinuxStrategyOptions type for use with -// apply. -func SELinuxStrategyOptions() *SELinuxStrategyOptionsApplyConfiguration { - return &SELinuxStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *SELinuxStrategyOptionsApplyConfiguration) WithRule(value v1beta1.SELinuxStrategy) *SELinuxStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithSELinuxOptions sets the SELinuxOptions field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SELinuxOptions field is set to the value of the last call. -func (b *SELinuxStrategyOptionsApplyConfiguration) WithSELinuxOptions(value *v1.SELinuxOptionsApplyConfiguration) *SELinuxStrategyOptionsApplyConfiguration { - b.SELinuxOptions = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go deleted file mode 100644 index ec4313812..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// SupplementalGroupsStrategyOptionsApplyConfiguration represents an declarative configuration of the SupplementalGroupsStrategyOptions type for use -// with apply. -type SupplementalGroupsStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.SupplementalGroupsStrategyType `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// SupplementalGroupsStrategyOptionsApplyConfiguration constructs an declarative configuration of the SupplementalGroupsStrategyOptions type for use with -// apply. -func SupplementalGroupsStrategyOptions() *SupplementalGroupsStrategyOptionsApplyConfiguration { - return &SupplementalGroupsStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *SupplementalGroupsStrategyOptionsApplyConfiguration) WithRule(value v1beta1.SupplementalGroupsStrategyType) *SupplementalGroupsStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *SupplementalGroupsStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *SupplementalGroupsStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go index 94dd2160d..361b2f4e8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go +++ b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go @@ -39,6 +39,17 @@ func Parser() *typed.Parser { var parserOnce sync.Once var parser *typed.Parser var schemaYAML = typed.YAMLObject(`types: +- name: io.k8s.api.admissionregistration.v1.MatchCondition + map: + fields: + - name: expression + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" - name: io.k8s.api.admissionregistration.v1.MutatingWebhook map: fields: @@ -55,6 +66,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: failurePolicy type: scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.MatchCondition + elementRelationship: associative + keys: + - name - name: matchPolicy type: scalar: string @@ -167,6 +186,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: failurePolicy type: scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.MatchCondition + elementRelationship: associative + keys: + - name - name: matchPolicy type: scalar: string @@ -225,6 +252,39 @@ var schemaYAML = typed.YAMLObject(`types: - name: url type: scalar: string +- name: io.k8s.api.admissionregistration.v1alpha1.AuditAnnotation + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: valueExpression + type: + scalar: string + default: "" +- name: io.k8s.api.admissionregistration.v1alpha1.ExpressionWarning + map: + fields: + - name: fieldRef + type: + scalar: string + default: "" + - name: warning + type: + scalar: string + default: "" +- name: io.k8s.api.admissionregistration.v1alpha1.MatchCondition + map: + fields: + - name: expression + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" - name: io.k8s.api.admissionregistration.v1alpha1.MatchResources map: fields: @@ -307,6 +367,15 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1alpha1.TypeChecking + map: + fields: + - name: expressionWarnings + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1alpha1.ExpressionWarning + elementRelationship: atomic - name: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicy map: fields: @@ -324,6 +393,10 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicySpec default: {} + - name: status + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyStatus + default: {} - name: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBinding map: fields: @@ -353,12 +426,32 @@ var schemaYAML = typed.YAMLObject(`types: - name: policyName type: scalar: string + - name: validationActions + type: + list: + elementType: + scalar: string + elementRelationship: associative - name: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicySpec map: fields: + - name: auditAnnotations + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1alpha1.AuditAnnotation + elementRelationship: atomic - name: failurePolicy type: scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1alpha1.MatchCondition + elementRelationship: associative + keys: + - name - name: matchConstraints type: namedType: io.k8s.api.admissionregistration.v1alpha1.MatchResources @@ -371,6 +464,23 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.admissionregistration.v1alpha1.Validation elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: observedGeneration + type: + scalar: numeric + - name: typeChecking + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.TypeChecking - name: io.k8s.api.admissionregistration.v1alpha1.Validation map: fields: @@ -381,9 +491,23 @@ var schemaYAML = typed.YAMLObject(`types: - name: message type: scalar: string + - name: messageExpression + type: + scalar: string - name: reason type: scalar: string +- name: io.k8s.api.admissionregistration.v1beta1.MatchCondition + map: + fields: + - name: expression + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" - name: io.k8s.api.admissionregistration.v1beta1.MutatingWebhook map: fields: @@ -400,6 +524,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: failurePolicy type: scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.MatchCondition + elementRelationship: associative + keys: + - name - name: matchPolicy type: scalar: string @@ -482,6 +614,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: failurePolicy type: scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.MatchCondition + elementRelationship: associative + keys: + - name - name: matchPolicy type: scalar: string @@ -3502,6 +3642,33 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - type +- name: io.k8s.api.certificates.v1alpha1.ClusterTrustBundle + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.certificates.v1alpha1.ClusterTrustBundleSpec + default: {} +- name: io.k8s.api.certificates.v1alpha1.ClusterTrustBundleSpec + map: + fields: + - name: signerName + type: + scalar: string + - name: trustBundle + type: + scalar: string + default: "" - name: io.k8s.api.certificates.v1beta1.CertificateSigningRequest map: fields: @@ -4129,6 +4296,12 @@ var schemaYAML = typed.YAMLObject(`types: - name: readinessProbe type: namedType: io.k8s.api.core.v1.Probe + - name: resizePolicy + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ContainerResizePolicy + elementRelationship: atomic - name: resources type: namedType: io.k8s.api.core.v1.ResourceRequirements @@ -4205,6 +4378,17 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: TCP +- name: io.k8s.api.core.v1.ContainerResizePolicy + map: + fields: + - name: resourceName + type: + scalar: string + default: "" + - name: restartPolicy + type: + scalar: string + default: "" - name: io.k8s.api.core.v1.ContainerState map: fields: @@ -4263,6 +4447,11 @@ var schemaYAML = typed.YAMLObject(`types: - name: io.k8s.api.core.v1.ContainerStatus map: fields: + - name: allocatedResources + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - name: containerID type: scalar: string @@ -4286,6 +4475,9 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: boolean default: false + - name: resources + type: + namedType: io.k8s.api.core.v1.ResourceRequirements - name: restartCount type: scalar: numeric @@ -4521,6 +4713,12 @@ var schemaYAML = typed.YAMLObject(`types: - name: readinessProbe type: namedType: io.k8s.api.core.v1.Probe + - name: resizePolicy + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ContainerResizePolicy + elementRelationship: atomic - name: resources type: namedType: io.k8s.api.core.v1.ResourceRequirements @@ -6185,6 +6383,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: reason type: scalar: string + - name: resize + type: + scalar: string - name: startTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time @@ -7734,29 +7935,6 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime default: {} -- name: io.k8s.api.extensions.v1beta1.AllowedCSIDriver - map: - fields: - - name: name - type: - scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.AllowedFlexVolume - map: - fields: - - name: driver - type: - scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.AllowedHostPath - map: - fields: - - name: pathPrefix - type: - scalar: string - - name: readOnly - type: - scalar: boolean - name: io.k8s.api.extensions.v1beta1.DaemonSet map: fields: @@ -7992,18 +8170,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.extensions.v1beta1.FSGroupStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string - name: io.k8s.api.extensions.v1beta1.HTTPIngressPath map: fields: @@ -8026,28 +8192,6 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.extensions.v1beta1.HTTPIngressPath elementRelationship: atomic -- name: io.k8s.api.extensions.v1beta1.HostPortRange - map: - fields: - - name: max - type: - scalar: numeric - default: 0 - - name: min - type: - scalar: numeric - default: 0 -- name: io.k8s.api.extensions.v1beta1.IDRange - map: - fields: - - name: max - type: - scalar: numeric - default: 0 - - name: min - type: - scalar: numeric - default: 0 - name: io.k8s.api.extensions.v1beta1.IPBlock map: fields: @@ -8293,135 +8437,6 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - type -- name: io.k8s.api.extensions.v1beta1.PodSecurityPolicy - map: - fields: - - name: apiVersion - type: - scalar: string - - name: kind - type: - scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec - type: - namedType: io.k8s.api.extensions.v1beta1.PodSecurityPolicySpec - default: {} -- name: io.k8s.api.extensions.v1beta1.PodSecurityPolicySpec - map: - fields: - - name: allowPrivilegeEscalation - type: - scalar: boolean - - name: allowedCSIDrivers - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.AllowedCSIDriver - elementRelationship: atomic - - name: allowedCapabilities - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: allowedFlexVolumes - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.AllowedFlexVolume - elementRelationship: atomic - - name: allowedHostPaths - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.AllowedHostPath - elementRelationship: atomic - - name: allowedProcMountTypes - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: allowedUnsafeSysctls - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: defaultAddCapabilities - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: defaultAllowPrivilegeEscalation - type: - scalar: boolean - - name: forbiddenSysctls - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: fsGroup - type: - namedType: io.k8s.api.extensions.v1beta1.FSGroupStrategyOptions - default: {} - - name: hostIPC - type: - scalar: boolean - - name: hostNetwork - type: - scalar: boolean - - name: hostPID - type: - scalar: boolean - - name: hostPorts - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.HostPortRange - elementRelationship: atomic - - name: privileged - type: - scalar: boolean - - name: readOnlyRootFilesystem - type: - scalar: boolean - - name: requiredDropCapabilities - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: runAsGroup - type: - namedType: io.k8s.api.extensions.v1beta1.RunAsGroupStrategyOptions - - name: runAsUser - type: - namedType: io.k8s.api.extensions.v1beta1.RunAsUserStrategyOptions - default: {} - - name: runtimeClass - type: - namedType: io.k8s.api.extensions.v1beta1.RuntimeClassStrategyOptions - - name: seLinux - type: - namedType: io.k8s.api.extensions.v1beta1.SELinuxStrategyOptions - default: {} - - name: supplementalGroups - type: - namedType: io.k8s.api.extensions.v1beta1.SupplementalGroupsStrategyOptions - default: {} - - name: volumes - type: - list: - elementType: - scalar: string - elementRelationship: atomic - name: io.k8s.api.extensions.v1beta1.ReplicaSet map: fields: @@ -8531,66 +8546,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: maxUnavailable type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString -- name: io.k8s.api.extensions.v1beta1.RunAsGroupStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.RunAsUserStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.RuntimeClassStrategyOptions - map: - fields: - - name: allowedRuntimeClassNames - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: defaultRuntimeClassName - type: - scalar: string -- name: io.k8s.api.extensions.v1beta1.SELinuxStrategyOptions - map: - fields: - - name: rule - type: - scalar: string - default: "" - - name: seLinuxOptions - type: - namedType: io.k8s.api.core.v1.SELinuxOptions -- name: io.k8s.api.extensions.v1beta1.SupplementalGroupsStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string - name: io.k8s.api.flowcontrol.v1alpha1.FlowDistinguisherMethod map: fields: @@ -10270,6 +10225,47 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: numeric default: 0 +- name: io.k8s.api.networking.v1alpha1.IPAddress + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.networking.v1alpha1.IPAddressSpec + default: {} +- name: io.k8s.api.networking.v1alpha1.IPAddressSpec + map: + fields: + - name: parentRef + type: + namedType: io.k8s.api.networking.v1alpha1.ParentReference +- name: io.k8s.api.networking.v1alpha1.ParentReference + map: + fields: + - name: group + type: + scalar: string + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: resource + type: + scalar: string + - name: uid + type: + scalar: string - name: io.k8s.api.networking.v1beta1.HTTPIngressPath map: fields: @@ -11509,19 +11505,22 @@ var schemaYAML = typed.YAMLObject(`types: - name: namespace type: scalar: string -- name: io.k8s.api.resource.v1alpha1.AllocationResult +- name: io.k8s.api.resource.v1alpha2.AllocationResult map: fields: - name: availableOnNodes type: namedType: io.k8s.api.core.v1.NodeSelector - - name: resourceHandle + - name: resourceHandles type: - scalar: string + list: + elementType: + namedType: io.k8s.api.resource.v1alpha2.ResourceHandle + elementRelationship: atomic - name: shareable type: scalar: boolean -- name: io.k8s.api.resource.v1alpha1.PodScheduling +- name: io.k8s.api.resource.v1alpha2.PodSchedulingContext map: fields: - name: apiVersion @@ -11536,13 +11535,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha1.PodSchedulingSpec + namedType: io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec default: {} - name: status type: - namedType: io.k8s.api.resource.v1alpha1.PodSchedulingStatus + namedType: io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus default: {} -- name: io.k8s.api.resource.v1alpha1.PodSchedulingSpec +- name: io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec map: fields: - name: potentialNodes @@ -11554,18 +11553,18 @@ var schemaYAML = typed.YAMLObject(`types: - name: selectedNode type: scalar: string -- name: io.k8s.api.resource.v1alpha1.PodSchedulingStatus +- name: io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus map: fields: - name: resourceClaims type: list: elementType: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimSchedulingStatus + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus elementRelationship: associative keys: - name -- name: io.k8s.api.resource.v1alpha1.ResourceClaim +- name: io.k8s.api.resource.v1alpha2.ResourceClaim map: fields: - name: apiVersion @@ -11580,13 +11579,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimSpec + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSpec default: {} - name: status type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimStatus + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimStatus default: {} -- name: io.k8s.api.resource.v1alpha1.ResourceClaimConsumerReference +- name: io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference map: fields: - name: apiGroup @@ -11604,7 +11603,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.resource.v1alpha1.ResourceClaimParametersReference +- name: io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference map: fields: - name: apiGroup @@ -11618,7 +11617,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.resource.v1alpha1.ResourceClaimSchedulingStatus +- name: io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus map: fields: - name: name @@ -11630,7 +11629,7 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.resource.v1alpha1.ResourceClaimSpec +- name: io.k8s.api.resource.v1alpha2.ResourceClaimSpec map: fields: - name: allocationMode @@ -11638,17 +11637,17 @@ var schemaYAML = typed.YAMLObject(`types: scalar: string - name: parametersRef type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimParametersReference + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference - name: resourceClassName type: scalar: string default: "" -- name: io.k8s.api.resource.v1alpha1.ResourceClaimStatus +- name: io.k8s.api.resource.v1alpha2.ResourceClaimStatus map: fields: - name: allocation type: - namedType: io.k8s.api.resource.v1alpha1.AllocationResult + namedType: io.k8s.api.resource.v1alpha2.AllocationResult - name: deallocationRequested type: scalar: boolean @@ -11659,11 +11658,11 @@ var schemaYAML = typed.YAMLObject(`types: type: list: elementType: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimConsumerReference + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference elementRelationship: associative keys: - uid -- name: io.k8s.api.resource.v1alpha1.ResourceClaimTemplate +- name: io.k8s.api.resource.v1alpha2.ResourceClaimTemplate map: fields: - name: apiVersion @@ -11678,9 +11677,9 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimTemplateSpec + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec default: {} -- name: io.k8s.api.resource.v1alpha1.ResourceClaimTemplateSpec +- name: io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec map: fields: - name: metadata @@ -11689,9 +11688,9 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimSpec + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSpec default: {} -- name: io.k8s.api.resource.v1alpha1.ResourceClass +- name: io.k8s.api.resource.v1alpha2.ResourceClass map: fields: - name: apiVersion @@ -11710,11 +11709,11 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: parametersRef type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClassParametersReference + namedType: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference - name: suitableNodes type: namedType: io.k8s.api.core.v1.NodeSelector -- name: io.k8s.api.resource.v1alpha1.ResourceClassParametersReference +- name: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference map: fields: - name: apiGroup @@ -11731,6 +11730,15 @@ var schemaYAML = typed.YAMLObject(`types: - name: namespace type: scalar: string +- name: io.k8s.api.resource.v1alpha2.ResourceHandle + map: + fields: + - name: data + type: + scalar: string + - name: driverName + type: + scalar: string - name: io.k8s.api.scheduling.v1.PriorityClass map: fields: diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/groupversionkind.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/groupversionkind.go deleted file mode 100644 index f400e5164..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/groupversionkind.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -// GroupVersionKindApplyConfiguration represents an declarative configuration of the GroupVersionKind type for use -// with apply. -type GroupVersionKindApplyConfiguration struct { - Group *string `json:"group,omitempty"` - Version *string `json:"version,omitempty"` - Kind *string `json:"kind,omitempty"` -} - -// GroupVersionKindApplyConfiguration constructs an declarative configuration of the GroupVersionKind type for use with -// apply. -func GroupVersionKind() *GroupVersionKindApplyConfiguration { - return &GroupVersionKindApplyConfiguration{} -} - -// WithGroup sets the Group field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Group field is set to the value of the last call. -func (b *GroupVersionKindApplyConfiguration) WithGroup(value string) *GroupVersionKindApplyConfiguration { - b.Group = &value - return b -} - -// WithVersion sets the Version field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Version field is set to the value of the last call. -func (b *GroupVersionKindApplyConfiguration) WithVersion(value string) *GroupVersionKindApplyConfiguration { - b.Version = &value - return b -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *GroupVersionKindApplyConfiguration) WithKind(value string) *GroupVersionKindApplyConfiguration { - b.Kind = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/listmeta.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/listmeta.go deleted file mode 100644 index 5cadee335..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/listmeta.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -// ListMetaApplyConfiguration represents an declarative configuration of the ListMeta type for use -// with apply. -type ListMetaApplyConfiguration struct { - SelfLink *string `json:"selfLink,omitempty"` - ResourceVersion *string `json:"resourceVersion,omitempty"` - Continue *string `json:"continue,omitempty"` - RemainingItemCount *int64 `json:"remainingItemCount,omitempty"` -} - -// ListMetaApplyConfiguration constructs an declarative configuration of the ListMeta type for use with -// apply. -func ListMeta() *ListMetaApplyConfiguration { - return &ListMetaApplyConfiguration{} -} - -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ListMetaApplyConfiguration) WithSelfLink(value string) *ListMetaApplyConfiguration { - b.SelfLink = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *ListMetaApplyConfiguration) WithResourceVersion(value string) *ListMetaApplyConfiguration { - b.ResourceVersion = &value - return b -} - -// WithContinue sets the Continue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Continue field is set to the value of the last call. -func (b *ListMetaApplyConfiguration) WithContinue(value string) *ListMetaApplyConfiguration { - b.Continue = &value - return b -} - -// WithRemainingItemCount sets the RemainingItemCount field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RemainingItemCount field is set to the value of the last call. -func (b *ListMetaApplyConfiguration) WithRemainingItemCount(value int64) *ListMetaApplyConfiguration { - b.RemainingItemCount = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/status.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/status.go deleted file mode 100644 index 7db432089..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/status.go +++ /dev/null @@ -1,142 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// StatusApplyConfiguration represents an declarative configuration of the Status type for use -// with apply. -type StatusApplyConfiguration struct { - TypeMetaApplyConfiguration `json:",inline"` - *ListMetaApplyConfiguration `json:"metadata,omitempty"` - Status *string `json:"status,omitempty"` - Message *string `json:"message,omitempty"` - Reason *metav1.StatusReason `json:"reason,omitempty"` - Details *StatusDetailsApplyConfiguration `json:"details,omitempty"` - Code *int32 `json:"code,omitempty"` -} - -// StatusApplyConfiguration constructs an declarative configuration of the Status type for use with -// apply. -func Status() *StatusApplyConfiguration { - b := &StatusApplyConfiguration{} - b.WithKind("Status") - b.WithAPIVersion("meta.k8s.io/v1") - return b -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithKind(value string) *StatusApplyConfiguration { - b.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithAPIVersion(value string) *StatusApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithSelfLink(value string) *StatusApplyConfiguration { - b.ensureListMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithResourceVersion(value string) *StatusApplyConfiguration { - b.ensureListMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithContinue sets the Continue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Continue field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithContinue(value string) *StatusApplyConfiguration { - b.ensureListMetaApplyConfigurationExists() - b.Continue = &value - return b -} - -// WithRemainingItemCount sets the RemainingItemCount field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RemainingItemCount field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithRemainingItemCount(value int64) *StatusApplyConfiguration { - b.ensureListMetaApplyConfigurationExists() - b.RemainingItemCount = &value - return b -} - -func (b *StatusApplyConfiguration) ensureListMetaApplyConfigurationExists() { - if b.ListMetaApplyConfiguration == nil { - b.ListMetaApplyConfiguration = &ListMetaApplyConfiguration{} - } -} - -// WithStatus sets the Status field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Status field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithStatus(value string) *StatusApplyConfiguration { - b.Status = &value - return b -} - -// WithMessage sets the Message field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Message field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithMessage(value string) *StatusApplyConfiguration { - b.Message = &value - return b -} - -// WithReason sets the Reason field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Reason field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithReason(value metav1.StatusReason) *StatusApplyConfiguration { - b.Reason = &value - return b -} - -// WithDetails sets the Details field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Details field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithDetails(value *StatusDetailsApplyConfiguration) *StatusApplyConfiguration { - b.Details = value - return b -} - -// WithCode sets the Code field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Code field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithCode(value int32) *StatusApplyConfiguration { - b.Code = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statuscause.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statuscause.go deleted file mode 100644 index 7f05bca49..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statuscause.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// StatusCauseApplyConfiguration represents an declarative configuration of the StatusCause type for use -// with apply. -type StatusCauseApplyConfiguration struct { - Type *v1.CauseType `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` - Field *string `json:"field,omitempty"` -} - -// StatusCauseApplyConfiguration constructs an declarative configuration of the StatusCause type for use with -// apply. -func StatusCause() *StatusCauseApplyConfiguration { - return &StatusCauseApplyConfiguration{} -} - -// WithType sets the Type field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Type field is set to the value of the last call. -func (b *StatusCauseApplyConfiguration) WithType(value v1.CauseType) *StatusCauseApplyConfiguration { - b.Type = &value - return b -} - -// WithMessage sets the Message field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Message field is set to the value of the last call. -func (b *StatusCauseApplyConfiguration) WithMessage(value string) *StatusCauseApplyConfiguration { - b.Message = &value - return b -} - -// WithField sets the Field field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Field field is set to the value of the last call. -func (b *StatusCauseApplyConfiguration) WithField(value string) *StatusCauseApplyConfiguration { - b.Field = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statusdetails.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statusdetails.go deleted file mode 100644 index a7dbaa1b2..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statusdetails.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -import ( - types "k8s.io/apimachinery/pkg/types" -) - -// StatusDetailsApplyConfiguration represents an declarative configuration of the StatusDetails type for use -// with apply. -type StatusDetailsApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Group *string `json:"group,omitempty"` - Kind *string `json:"kind,omitempty"` - UID *types.UID `json:"uid,omitempty"` - Causes []StatusCauseApplyConfiguration `json:"causes,omitempty"` - RetryAfterSeconds *int32 `json:"retryAfterSeconds,omitempty"` -} - -// StatusDetailsApplyConfiguration constructs an declarative configuration of the StatusDetails type for use with -// apply. -func StatusDetails() *StatusDetailsApplyConfiguration { - return &StatusDetailsApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *StatusDetailsApplyConfiguration) WithName(value string) *StatusDetailsApplyConfiguration { - b.Name = &value - return b -} - -// WithGroup sets the Group field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Group field is set to the value of the last call. -func (b *StatusDetailsApplyConfiguration) WithGroup(value string) *StatusDetailsApplyConfiguration { - b.Group = &value - return b -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *StatusDetailsApplyConfiguration) WithKind(value string) *StatusDetailsApplyConfiguration { - b.Kind = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *StatusDetailsApplyConfiguration) WithUID(value types.UID) *StatusDetailsApplyConfiguration { - b.UID = &value - return b -} - -// WithCauses adds the given value to the Causes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Causes field. -func (b *StatusDetailsApplyConfiguration) WithCauses(values ...*StatusCauseApplyConfiguration) *StatusDetailsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithCauses") - } - b.Causes = append(b.Causes, *values[i]) - } - return b -} - -// WithRetryAfterSeconds sets the RetryAfterSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RetryAfterSeconds field is set to the value of the last call. -func (b *StatusDetailsApplyConfiguration) WithRetryAfterSeconds(value int32) *StatusDetailsApplyConfiguration { - b.RetryAfterSeconds = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go similarity index 66% rename from vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go rename to vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go index c70906cfa..da6822111 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1beta1 +package v1alpha1 import ( - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,63 +27,63 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodSecurityPolicyApplyConfiguration represents an declarative configuration of the PodSecurityPolicy type for use +// IPAddressApplyConfiguration represents an declarative configuration of the IPAddress type for use // with apply. -type PodSecurityPolicyApplyConfiguration struct { +type IPAddressApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PodSecurityPolicySpecApplyConfiguration `json:"spec,omitempty"` + Spec *IPAddressSpecApplyConfiguration `json:"spec,omitempty"` } -// PodSecurityPolicy constructs an declarative configuration of the PodSecurityPolicy type for use with +// IPAddress constructs an declarative configuration of the IPAddress type for use with // apply. -func PodSecurityPolicy(name string) *PodSecurityPolicyApplyConfiguration { - b := &PodSecurityPolicyApplyConfiguration{} +func IPAddress(name string) *IPAddressApplyConfiguration { + b := &IPAddressApplyConfiguration{} b.WithName(name) - b.WithKind("PodSecurityPolicy") - b.WithAPIVersion("extensions/v1beta1") + b.WithKind("IPAddress") + b.WithAPIVersion("networking.k8s.io/v1alpha1") return b } -// ExtractPodSecurityPolicy extracts the applied configuration owned by fieldManager from -// podSecurityPolicy. If no managedFields are found in podSecurityPolicy for fieldManager, a -// PodSecurityPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), +// ExtractIPAddress extracts the applied configuration owned by fieldManager from +// iPAddress. If no managedFields are found in iPAddress for fieldManager, a +// IPAddressApplyConfiguration is returned with only the Name, Namespace (if applicable), // APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. -// podSecurityPolicy must be a unmodified PodSecurityPolicy API object that was retrieved from the Kubernetes API. -// ExtractPodSecurityPolicy provides a way to perform a extract/modify-in-place/apply workflow. +// iPAddress must be a unmodified IPAddress API object that was retrieved from the Kubernetes API. +// ExtractIPAddress provides a way to perform a extract/modify-in-place/apply workflow. // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPodSecurityPolicy(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { - return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "") +func ExtractIPAddress(iPAddress *networkingv1alpha1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) { + return extractIPAddress(iPAddress, fieldManager, "") } -// ExtractPodSecurityPolicyStatus is the same as ExtractPodSecurityPolicy except +// ExtractIPAddressStatus is the same as ExtractIPAddress except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPodSecurityPolicyStatus(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { - return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "status") +func ExtractIPAddressStatus(iPAddress *networkingv1alpha1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) { + return extractIPAddress(iPAddress, fieldManager, "status") } -func extractPodSecurityPolicy(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string, subresource string) (*PodSecurityPolicyApplyConfiguration, error) { - b := &PodSecurityPolicyApplyConfiguration{} - err := managedfields.ExtractInto(podSecurityPolicy, internal.Parser().Type("io.k8s.api.extensions.v1beta1.PodSecurityPolicy"), fieldManager, b, subresource) +func extractIPAddress(iPAddress *networkingv1alpha1.IPAddress, fieldManager string, subresource string) (*IPAddressApplyConfiguration, error) { + b := &IPAddressApplyConfiguration{} + err := managedfields.ExtractInto(iPAddress, internal.Parser().Type("io.k8s.api.networking.v1alpha1.IPAddress"), fieldManager, b, subresource) if err != nil { return nil, err } - b.WithName(podSecurityPolicy.Name) + b.WithName(iPAddress.Name) - b.WithKind("PodSecurityPolicy") - b.WithAPIVersion("extensions/v1beta1") + b.WithKind("IPAddress") + b.WithAPIVersion("networking.k8s.io/v1alpha1") return b, nil } // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithKind(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithKind(value string) *IPAddressApplyConfiguration { b.Kind = &value return b } @@ -91,7 +91,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithKind(value string) *PodSecurit // WithAPIVersion sets the APIVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithAPIVersion(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithAPIVersion(value string) *IPAddressApplyConfiguration { b.APIVersion = &value return b } @@ -99,7 +99,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithAPIVersion(value string) *PodS // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithName(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithName(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Name = &value return b @@ -108,7 +108,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithName(value string) *PodSecurit // WithGenerateName sets the GenerateName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GenerateName field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithGenerateName(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithGenerateName(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.GenerateName = &value return b @@ -117,7 +117,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithGenerateName(value string) *Po // WithNamespace sets the Namespace field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Namespace field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithNamespace(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithNamespace(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Namespace = &value return b @@ -126,7 +126,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithNamespace(value string) *PodSe // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithUID(value types.UID) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithUID(value types.UID) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.UID = &value return b @@ -135,7 +135,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithUID(value types.UID) *PodSecur // WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithResourceVersion(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithResourceVersion(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.ResourceVersion = &value return b @@ -144,7 +144,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithResourceVersion(value string) // WithGeneration sets the Generation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Generation field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithGeneration(value int64) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithGeneration(value int64) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Generation = &value return b @@ -153,7 +153,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithGeneration(value int64) *PodSe // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.CreationTimestamp = &value return b @@ -162,7 +162,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithCreationTimestamp(value metav1 // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionTimestamp = &value return b @@ -171,7 +171,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithDeletionTimestamp(value metav1 // WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionGracePeriodSeconds = &value return b @@ -181,7 +181,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(val // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Labels field, // overwriting an existing map entries in Labels field with the same key. -func (b *PodSecurityPolicyApplyConfiguration) WithLabels(entries map[string]string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithLabels(entries map[string]string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Labels == nil && len(entries) > 0 { b.Labels = make(map[string]string, len(entries)) @@ -196,7 +196,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithLabels(entries map[string]stri // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Annotations field, // overwriting an existing map entries in Annotations field with the same key. -func (b *PodSecurityPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithAnnotations(entries map[string]string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Annotations == nil && len(entries) > 0 { b.Annotations = make(map[string]string, len(entries)) @@ -210,7 +210,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithAnnotations(entries map[string // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PodSecurityPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { @@ -224,7 +224,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithOwnerReferences(values ...*v1. // WithFinalizers adds the given value to the Finalizers field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *PodSecurityPolicyApplyConfiguration) WithFinalizers(values ...string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithFinalizers(values ...string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { b.Finalizers = append(b.Finalizers, values[i]) @@ -232,7 +232,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithFinalizers(values ...string) * return b } -func (b *PodSecurityPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { +func (b *IPAddressApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} } @@ -241,7 +241,7 @@ func (b *PodSecurityPolicyApplyConfiguration) ensureObjectMetaApplyConfiguration // WithSpec sets the Spec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Spec field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithSpec(value *PodSecurityPolicySpecApplyConfiguration) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithSpec(value *IPAddressSpecApplyConfiguration) *IPAddressApplyConfiguration { b.Spec = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go new file mode 100644 index 000000000..064963d69 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// IPAddressSpecApplyConfiguration represents an declarative configuration of the IPAddressSpec type for use +// with apply. +type IPAddressSpecApplyConfiguration struct { + ParentRef *ParentReferenceApplyConfiguration `json:"parentRef,omitempty"` +} + +// IPAddressSpecApplyConfiguration constructs an declarative configuration of the IPAddressSpec type for use with +// apply. +func IPAddressSpec() *IPAddressSpecApplyConfiguration { + return &IPAddressSpecApplyConfiguration{} +} + +// WithParentRef sets the ParentRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParentRef field is set to the value of the last call. +func (b *IPAddressSpecApplyConfiguration) WithParentRef(value *ParentReferenceApplyConfiguration) *IPAddressSpecApplyConfiguration { + b.ParentRef = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go new file mode 100644 index 000000000..14b10b19f --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go @@ -0,0 +1,79 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + types "k8s.io/apimachinery/pkg/types" +) + +// ParentReferenceApplyConfiguration represents an declarative configuration of the ParentReference type for use +// with apply. +type ParentReferenceApplyConfiguration struct { + Group *string `json:"group,omitempty"` + Resource *string `json:"resource,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` + UID *types.UID `json:"uid,omitempty"` +} + +// ParentReferenceApplyConfiguration constructs an declarative configuration of the ParentReference type for use with +// apply. +func ParentReference() *ParentReferenceApplyConfiguration { + return &ParentReferenceApplyConfiguration{} +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithGroup(value string) *ParentReferenceApplyConfiguration { + b.Group = &value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithResource(value string) *ParentReferenceApplyConfiguration { + b.Resource = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithNamespace(value string) *ParentReferenceApplyConfiguration { + b.Namespace = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithName(value string) *ParentReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithUID(value types.UID) *ParentReferenceApplyConfiguration { + b.UID = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/allocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go similarity index 76% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/allocationresult.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go index a2ad3adf1..bc6078aa9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/allocationresult.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" @@ -25,7 +25,7 @@ import ( // AllocationResultApplyConfiguration represents an declarative configuration of the AllocationResult type for use // with apply. type AllocationResultApplyConfiguration struct { - ResourceHandle *string `json:"resourceHandle,omitempty"` + ResourceHandles []ResourceHandleApplyConfiguration `json:"resourceHandles,omitempty"` AvailableOnNodes *v1.NodeSelectorApplyConfiguration `json:"availableOnNodes,omitempty"` Shareable *bool `json:"shareable,omitempty"` } @@ -36,11 +36,16 @@ func AllocationResult() *AllocationResultApplyConfiguration { return &AllocationResultApplyConfiguration{} } -// WithResourceHandle sets the ResourceHandle field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceHandle field is set to the value of the last call. -func (b *AllocationResultApplyConfiguration) WithResourceHandle(value string) *AllocationResultApplyConfiguration { - b.ResourceHandle = &value +// WithResourceHandles adds the given value to the ResourceHandles field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceHandles field. +func (b *AllocationResultApplyConfiguration) WithResourceHandles(values ...*ResourceHandleApplyConfiguration) *AllocationResultApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceHandles") + } + b.ResourceHandles = append(b.ResourceHandles, *values[i]) + } return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go new file mode 100644 index 000000000..1dfb6ff97 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go @@ -0,0 +1,258 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// PodSchedulingContextApplyConfiguration represents an declarative configuration of the PodSchedulingContext type for use +// with apply. +type PodSchedulingContextApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PodSchedulingContextSpecApplyConfiguration `json:"spec,omitempty"` + Status *PodSchedulingContextStatusApplyConfiguration `json:"status,omitempty"` +} + +// PodSchedulingContext constructs an declarative configuration of the PodSchedulingContext type for use with +// apply. +func PodSchedulingContext(name, namespace string) *PodSchedulingContextApplyConfiguration { + b := &PodSchedulingContextApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("PodSchedulingContext") + b.WithAPIVersion("resource.k8s.io/v1alpha2") + return b +} + +// ExtractPodSchedulingContext extracts the applied configuration owned by fieldManager from +// podSchedulingContext. If no managedFields are found in podSchedulingContext for fieldManager, a +// PodSchedulingContextApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// podSchedulingContext must be a unmodified PodSchedulingContext API object that was retrieved from the Kubernetes API. +// ExtractPodSchedulingContext provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractPodSchedulingContext(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { + return extractPodSchedulingContext(podSchedulingContext, fieldManager, "") +} + +// ExtractPodSchedulingContextStatus is the same as ExtractPodSchedulingContext except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractPodSchedulingContextStatus(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { + return extractPodSchedulingContext(podSchedulingContext, fieldManager, "status") +} + +func extractPodSchedulingContext(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string, subresource string) (*PodSchedulingContextApplyConfiguration, error) { + b := &PodSchedulingContextApplyConfiguration{} + err := managedfields.ExtractInto(podSchedulingContext, internal.Parser().Type("io.k8s.api.resource.v1alpha2.PodSchedulingContext"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(podSchedulingContext.Name) + b.WithNamespace(podSchedulingContext.Namespace) + + b.WithKind("PodSchedulingContext") + b.WithAPIVersion("resource.k8s.io/v1alpha2") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithKind(value string) *PodSchedulingContextApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithAPIVersion(value string) *PodSchedulingContextApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithName(value string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithGenerateName(value string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithNamespace(value string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithUID(value types.UID) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithResourceVersion(value string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithGeneration(value int64) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *PodSchedulingContextApplyConfiguration) WithLabels(entries map[string]string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *PodSchedulingContextApplyConfiguration) WithAnnotations(entries map[string]string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *PodSchedulingContextApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *PodSchedulingContextApplyConfiguration) WithFinalizers(values ...string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *PodSchedulingContextApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithSpec(value *PodSchedulingContextSpecApplyConfiguration) *PodSchedulingContextApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithStatus(value *PodSchedulingContextStatusApplyConfiguration) *PodSchedulingContextApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go similarity index 67% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingspec.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go index 9fd3c1ee5..c95d3295e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go @@ -16,25 +16,25 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 -// PodSchedulingSpecApplyConfiguration represents an declarative configuration of the PodSchedulingSpec type for use +// PodSchedulingContextSpecApplyConfiguration represents an declarative configuration of the PodSchedulingContextSpec type for use // with apply. -type PodSchedulingSpecApplyConfiguration struct { +type PodSchedulingContextSpecApplyConfiguration struct { SelectedNode *string `json:"selectedNode,omitempty"` PotentialNodes []string `json:"potentialNodes,omitempty"` } -// PodSchedulingSpecApplyConfiguration constructs an declarative configuration of the PodSchedulingSpec type for use with +// PodSchedulingContextSpecApplyConfiguration constructs an declarative configuration of the PodSchedulingContextSpec type for use with // apply. -func PodSchedulingSpec() *PodSchedulingSpecApplyConfiguration { - return &PodSchedulingSpecApplyConfiguration{} +func PodSchedulingContextSpec() *PodSchedulingContextSpecApplyConfiguration { + return &PodSchedulingContextSpecApplyConfiguration{} } // WithSelectedNode sets the SelectedNode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the SelectedNode field is set to the value of the last call. -func (b *PodSchedulingSpecApplyConfiguration) WithSelectedNode(value string) *PodSchedulingSpecApplyConfiguration { +func (b *PodSchedulingContextSpecApplyConfiguration) WithSelectedNode(value string) *PodSchedulingContextSpecApplyConfiguration { b.SelectedNode = &value return b } @@ -42,7 +42,7 @@ func (b *PodSchedulingSpecApplyConfiguration) WithSelectedNode(value string) *Po // WithPotentialNodes adds the given value to the PotentialNodes field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the PotentialNodes field. -func (b *PodSchedulingSpecApplyConfiguration) WithPotentialNodes(values ...string) *PodSchedulingSpecApplyConfiguration { +func (b *PodSchedulingContextSpecApplyConfiguration) WithPotentialNodes(values ...string) *PodSchedulingContextSpecApplyConfiguration { for i := range values { b.PotentialNodes = append(b.PotentialNodes, values[i]) } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go similarity index 64% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingstatus.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go index 5744f6c3e..a8b10b9a0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go @@ -16,24 +16,24 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 -// PodSchedulingStatusApplyConfiguration represents an declarative configuration of the PodSchedulingStatus type for use +// PodSchedulingContextStatusApplyConfiguration represents an declarative configuration of the PodSchedulingContextStatus type for use // with apply. -type PodSchedulingStatusApplyConfiguration struct { +type PodSchedulingContextStatusApplyConfiguration struct { ResourceClaims []ResourceClaimSchedulingStatusApplyConfiguration `json:"resourceClaims,omitempty"` } -// PodSchedulingStatusApplyConfiguration constructs an declarative configuration of the PodSchedulingStatus type for use with +// PodSchedulingContextStatusApplyConfiguration constructs an declarative configuration of the PodSchedulingContextStatus type for use with // apply. -func PodSchedulingStatus() *PodSchedulingStatusApplyConfiguration { - return &PodSchedulingStatusApplyConfiguration{} +func PodSchedulingContextStatus() *PodSchedulingContextStatusApplyConfiguration { + return &PodSchedulingContextStatusApplyConfiguration{} } // WithResourceClaims adds the given value to the ResourceClaims field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the ResourceClaims field. -func (b *PodSchedulingStatusApplyConfiguration) WithResourceClaims(values ...*ResourceClaimSchedulingStatusApplyConfiguration) *PodSchedulingStatusApplyConfiguration { +func (b *PodSchedulingContextStatusApplyConfiguration) WithResourceClaims(values ...*ResourceClaimSchedulingStatusApplyConfiguration) *PodSchedulingContextStatusApplyConfiguration { for i := range values { if values[i] == nil { panic("nil value passed to WithResourceClaims") diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go similarity index 96% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaim.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go index f94811a9b..6c219f837 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaim.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -43,7 +43,7 @@ func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration { b.WithName(name) b.WithNamespace(namespace) b.WithKind("ResourceClaim") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b } @@ -58,20 +58,20 @@ func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractResourceClaim(resourceClaim *resourcev1alpha1.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { +func ExtractResourceClaim(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { return extractResourceClaim(resourceClaim, fieldManager, "") } // ExtractResourceClaimStatus is the same as ExtractResourceClaim except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractResourceClaimStatus(resourceClaim *resourcev1alpha1.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { +func ExtractResourceClaimStatus(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { return extractResourceClaim(resourceClaim, fieldManager, "status") } -func extractResourceClaim(resourceClaim *resourcev1alpha1.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) { +func extractResourceClaim(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) { b := &ResourceClaimApplyConfiguration{} - err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1alpha1.ResourceClaim"), fieldManager, b, subresource) + err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClaim"), fieldManager, b, subresource) if err != nil { return nil, err } @@ -79,7 +79,7 @@ func extractResourceClaim(resourceClaim *resourcev1alpha1.ResourceClaim, fieldMa b.WithNamespace(resourceClaim.Namespace) b.WithKind("ResourceClaim") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b, nil } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimconsumerreference.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimconsumerreference.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go index 477099cd7..41bb9e9a1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimconsumerreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( types "k8s.io/apimachinery/pkg/types" diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimparametersreference.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimparametersreference.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go index d7b25d75e..27820ede6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimparametersreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 // ResourceClaimParametersReferenceApplyConfiguration represents an declarative configuration of the ResourceClaimParametersReference type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimschedulingstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimschedulingstatus.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go index 35ff34aba..e74679aed 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimschedulingstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 // ResourceClaimSchedulingStatusApplyConfiguration represents an declarative configuration of the ResourceClaimSchedulingStatus type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go similarity index 93% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimspec.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go index d32619046..0c73e64e9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" ) // ResourceClaimSpecApplyConfiguration represents an declarative configuration of the ResourceClaimSpec type for use @@ -27,7 +27,7 @@ import ( type ResourceClaimSpecApplyConfiguration struct { ResourceClassName *string `json:"resourceClassName,omitempty"` ParametersRef *ResourceClaimParametersReferenceApplyConfiguration `json:"parametersRef,omitempty"` - AllocationMode *resourcev1alpha1.AllocationMode `json:"allocationMode,omitempty"` + AllocationMode *resourcev1alpha2.AllocationMode `json:"allocationMode,omitempty"` } // ResourceClaimSpecApplyConfiguration constructs an declarative configuration of the ResourceClaimSpec type for use with @@ -55,7 +55,7 @@ func (b *ResourceClaimSpecApplyConfiguration) WithParametersRef(value *ResourceC // WithAllocationMode sets the AllocationMode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AllocationMode field is set to the value of the last call. -func (b *ResourceClaimSpecApplyConfiguration) WithAllocationMode(value resourcev1alpha1.AllocationMode) *ResourceClaimSpecApplyConfiguration { +func (b *ResourceClaimSpecApplyConfiguration) WithAllocationMode(value resourcev1alpha2.AllocationMode) *ResourceClaimSpecApplyConfiguration { b.AllocationMode = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimstatus.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go index e2283f8b0..c6fa61090 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 // ResourceClaimStatusApplyConfiguration represents an declarative configuration of the ResourceClaimStatus type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go similarity index 96% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplate.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go index e3c602cb6..fc2209b8f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -42,7 +42,7 @@ func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyCo b.WithName(name) b.WithNamespace(namespace) b.WithKind("ResourceClaimTemplate") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b } @@ -57,20 +57,20 @@ func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyCo // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha1.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { +func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "") } // ExtractResourceClaimTemplateStatus is the same as ExtractResourceClaimTemplate except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1alpha1.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { +func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "status") } -func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha1.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) { +func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) { b := &ResourceClaimTemplateApplyConfiguration{} - err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1alpha1.ResourceClaimTemplate"), fieldManager, b, subresource) + err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClaimTemplate"), fieldManager, b, subresource) if err != nil { return nil, err } @@ -78,7 +78,7 @@ func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha1.Resour b.WithNamespace(resourceClaimTemplate.Namespace) b.WithKind("ResourceClaimTemplate") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b, nil } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplatespec.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go index 88058e066..2f38ea036 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplatespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclass.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go similarity index 96% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclass.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go index 5f980acdb..724c9e88e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -44,7 +44,7 @@ func ResourceClass(name string) *ResourceClassApplyConfiguration { b := &ResourceClassApplyConfiguration{} b.WithName(name) b.WithKind("ResourceClass") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b } @@ -59,27 +59,27 @@ func ResourceClass(name string) *ResourceClassApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractResourceClass(resourceClass *resourcev1alpha1.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { +func ExtractResourceClass(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { return extractResourceClass(resourceClass, fieldManager, "") } // ExtractResourceClassStatus is the same as ExtractResourceClass except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractResourceClassStatus(resourceClass *resourcev1alpha1.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { +func ExtractResourceClassStatus(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { return extractResourceClass(resourceClass, fieldManager, "status") } -func extractResourceClass(resourceClass *resourcev1alpha1.ResourceClass, fieldManager string, subresource string) (*ResourceClassApplyConfiguration, error) { +func extractResourceClass(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string, subresource string) (*ResourceClassApplyConfiguration, error) { b := &ResourceClassApplyConfiguration{} - err := managedfields.ExtractInto(resourceClass, internal.Parser().Type("io.k8s.api.resource.v1alpha1.ResourceClass"), fieldManager, b, subresource) + err := managedfields.ExtractInto(resourceClass, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClass"), fieldManager, b, subresource) if err != nil { return nil, err } b.WithName(resourceClass.Name) b.WithKind("ResourceClass") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b, nil } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclassparametersreference.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclassparametersreference.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go index b03a9a6da..d67e4d397 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclassparametersreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 // ResourceClassParametersReferenceApplyConfiguration represents an declarative configuration of the ResourceClassParametersReference type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go new file mode 100644 index 000000000..028cbaa1a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +// ResourceHandleApplyConfiguration represents an declarative configuration of the ResourceHandle type for use +// with apply. +type ResourceHandleApplyConfiguration struct { + DriverName *string `json:"driverName,omitempty"` + Data *string `json:"data,omitempty"` +} + +// ResourceHandleApplyConfiguration constructs an declarative configuration of the ResourceHandle type for use with +// apply. +func ResourceHandle() *ResourceHandleApplyConfiguration { + return &ResourceHandleApplyConfiguration{} +} + +// WithDriverName sets the DriverName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DriverName field is set to the value of the last call. +func (b *ResourceHandleApplyConfiguration) WithDriverName(value string) *ResourceHandleApplyConfiguration { + b.DriverName = &value + return b +} + +// WithData sets the Data field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Data field is set to the value of the last call. +func (b *ResourceHandleApplyConfiguration) WithData(value string) *ResourceHandleApplyConfiguration { + b.Data = &value + return b +} diff --git a/vendor/k8s.io/client-go/discovery/aggregated_discovery.go b/vendor/k8s.io/client-go/discovery/aggregated_discovery.go index 758b0a3ac..7470259dc 100644 --- a/vendor/k8s.io/client-go/discovery/aggregated_discovery.go +++ b/vendor/k8s.io/client-go/discovery/aggregated_discovery.go @@ -92,12 +92,18 @@ func convertAPIGroup(g apidiscovery.APIGroupDiscovery) ( resourceList := &metav1.APIResourceList{} resourceList.GroupVersion = gv.String() for _, r := range v.Resources { - resource := convertAPIResource(r) - resourceList.APIResources = append(resourceList.APIResources, resource) + resource, err := convertAPIResource(r) + if err == nil { + resourceList.APIResources = append(resourceList.APIResources, resource) + } // Subresources field in new format get transformed into full APIResources. + // It is possible a partial result with an error was returned to be used + // as the parent resource for the subresource. for _, subresource := range r.Subresources { - sr := convertAPISubresource(resource, subresource) - resourceList.APIResources = append(resourceList.APIResources, sr) + sr, err := convertAPISubresource(resource, subresource) + if err == nil { + resourceList.APIResources = append(resourceList.APIResources, sr) + } } } gvResources[gv] = resourceList @@ -105,30 +111,44 @@ func convertAPIGroup(g apidiscovery.APIGroupDiscovery) ( return group, gvResources, failedGVs } -// convertAPIResource tranforms a APIResourceDiscovery to an APIResource. -func convertAPIResource(in apidiscovery.APIResourceDiscovery) metav1.APIResource { - return metav1.APIResource{ +// convertAPIResource tranforms a APIResourceDiscovery to an APIResource. We are +// resilient to missing GVK, since this resource might be the parent resource +// for a subresource. If the parent is missing a GVK, it is not returned in +// discovery, and the subresource MUST have the GVK. +func convertAPIResource(in apidiscovery.APIResourceDiscovery) (metav1.APIResource, error) { + result := metav1.APIResource{ Name: in.Resource, SingularName: in.SingularResource, Namespaced: in.Scope == apidiscovery.ScopeNamespace, - Group: in.ResponseKind.Group, - Version: in.ResponseKind.Version, - Kind: in.ResponseKind.Kind, Verbs: in.Verbs, ShortNames: in.ShortNames, Categories: in.Categories, } + var err error + if in.ResponseKind != nil { + result.Group = in.ResponseKind.Group + result.Version = in.ResponseKind.Version + result.Kind = in.ResponseKind.Kind + } else { + err = fmt.Errorf("discovery resource %s missing GVK", in.Resource) + } + // Can return partial result with error, which can be the parent for a + // subresource. Do not add this result to the returned discovery resources. + return result, err } // convertAPISubresource tranforms a APISubresourceDiscovery to an APIResource. -func convertAPISubresource(parent metav1.APIResource, in apidiscovery.APISubresourceDiscovery) metav1.APIResource { - return metav1.APIResource{ - Name: fmt.Sprintf("%s/%s", parent.Name, in.Subresource), - SingularName: parent.SingularName, - Namespaced: parent.Namespaced, - Group: in.ResponseKind.Group, - Version: in.ResponseKind.Version, - Kind: in.ResponseKind.Kind, - Verbs: in.Verbs, +func convertAPISubresource(parent metav1.APIResource, in apidiscovery.APISubresourceDiscovery) (metav1.APIResource, error) { + result := metav1.APIResource{} + if in.ResponseKind == nil { + return result, fmt.Errorf("subresource %s/%s missing GVK", parent.Name, in.Subresource) } + result.Name = fmt.Sprintf("%s/%s", parent.Name, in.Subresource) + result.SingularName = parent.SingularName + result.Namespaced = parent.Namespaced + result.Group = in.ResponseKind.Group + result.Version = in.ResponseKind.Version + result.Kind = in.ResponseKind.Kind + result.Verbs = in.Verbs + return result, nil } diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go index 641568008..1253fa1f4 100644 --- a/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "fmt" + "mime" "net/http" "net/url" "sort" @@ -58,8 +59,9 @@ const ( defaultBurst = 300 AcceptV1 = runtime.ContentTypeJSON - // Aggregated discovery content-type (currently v2beta1). NOTE: Currently, we are assuming the order - // for "g", "v", and "as" from the server. We can only compare this string if we can make that assumption. + // Aggregated discovery content-type (v2beta1). NOTE: content-type parameters + // MUST be ordered (g, v, as) for server in "Accept" header (BUT we are resilient + // to ordering when comparing returned values in "Content-Type" header). AcceptV2Beta1 = runtime.ContentTypeJSON + ";" + "g=apidiscovery.k8s.io;v=v2beta1;as=APIGroupDiscoveryList" // Prioritize aggregated discovery by placing first in the order of discovery accept types. acceptDiscoveryFormats = AcceptV2Beta1 + "," + AcceptV1 @@ -259,8 +261,16 @@ func (d *DiscoveryClient) downloadLegacy() ( var resourcesByGV map[schema.GroupVersion]*metav1.APIResourceList // Switch on content-type server responded with: aggregated or unaggregated. - switch responseContentType { - case AcceptV1: + switch { + case isV2Beta1ContentType(responseContentType): + var aggregatedDiscovery apidiscovery.APIGroupDiscoveryList + err = json.Unmarshal(body, &aggregatedDiscovery) + if err != nil { + return nil, nil, nil, err + } + apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResources(aggregatedDiscovery) + default: + // Default is unaggregated discovery v1. var v metav1.APIVersions err = json.Unmarshal(body, &v) if err != nil { @@ -271,15 +281,6 @@ func (d *DiscoveryClient) downloadLegacy() ( apiGroup = apiVersionsToAPIGroup(&v) } apiGroupList.Groups = []metav1.APIGroup{apiGroup} - case AcceptV2Beta1: - var aggregatedDiscovery apidiscovery.APIGroupDiscoveryList - err = json.Unmarshal(body, &aggregatedDiscovery) - if err != nil { - return nil, nil, nil, err - } - apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResources(aggregatedDiscovery) - default: - return nil, nil, nil, fmt.Errorf("Unknown discovery response content-type: %s", responseContentType) } return apiGroupList, resourcesByGV, failedGVs, nil @@ -313,13 +314,8 @@ func (d *DiscoveryClient) downloadAPIs() ( failedGVs := map[schema.GroupVersion]error{} var resourcesByGV map[schema.GroupVersion]*metav1.APIResourceList // Switch on content-type server responded with: aggregated or unaggregated. - switch responseContentType { - case AcceptV1: - err = json.Unmarshal(body, apiGroupList) - if err != nil { - return nil, nil, nil, err - } - case AcceptV2Beta1: + switch { + case isV2Beta1ContentType(responseContentType): var aggregatedDiscovery apidiscovery.APIGroupDiscoveryList err = json.Unmarshal(body, &aggregatedDiscovery) if err != nil { @@ -327,12 +323,38 @@ func (d *DiscoveryClient) downloadAPIs() ( } apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResources(aggregatedDiscovery) default: - return nil, nil, nil, fmt.Errorf("Unknown discovery response content-type: %s", responseContentType) + // Default is unaggregated discovery v1. + err = json.Unmarshal(body, apiGroupList) + if err != nil { + return nil, nil, nil, err + } } return apiGroupList, resourcesByGV, failedGVs, nil } +// isV2Beta1ContentType checks of the content-type string is both +// "application/json" and contains the v2beta1 content-type params. +// NOTE: This function is resilient to the ordering of the +// content-type parameters, as well as parameters added by +// intermediaries such as proxies or gateways. Examples: +// +// "application/json; g=apidiscovery.k8s.io;v=v2beta1;as=APIGroupDiscoveryList" = true +// "application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io" = true +// "application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io;charset=utf-8" = true +// "application/json" = false +// "application/json; charset=UTF-8" = false +func isV2Beta1ContentType(contentType string) bool { + base, params, err := mime.ParseMediaType(contentType) + if err != nil { + return false + } + return runtime.ContentTypeJSON == base && + params["g"] == "apidiscovery.k8s.io" && + params["v"] == "v2beta1" && + params["as"] == "APIGroupDiscoveryList" +} + // ServerGroups returns the supported groups, with information like supported versions and the // preferred version. func (d *DiscoveryClient) ServerGroups() (*metav1.APIGroupList, error) { diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index 9eecbb2a8..6345f2fb6 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -42,6 +42,7 @@ import ( batchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" batchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" certificatesv1 "k8s.io/client-go/kubernetes/typed/certificates/v1" + certificatesv1alpha1 "k8s.io/client-go/kubernetes/typed/certificates/v1alpha1" certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" @@ -66,7 +67,7 @@ import ( rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" - resourcev1alpha1 "k8s.io/client-go/kubernetes/typed/resource/v1alpha1" + resourcev1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2" schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" @@ -99,6 +100,7 @@ type Interface interface { BatchV1beta1() batchv1beta1.BatchV1beta1Interface CertificatesV1() certificatesv1.CertificatesV1Interface CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface + CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1alpha1Interface CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface CoordinationV1() coordinationv1.CoordinationV1Interface CoreV1() corev1.CoreV1Interface @@ -122,7 +124,7 @@ type Interface interface { RbacV1() rbacv1.RbacV1Interface RbacV1beta1() rbacv1beta1.RbacV1beta1Interface RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface - ResourceV1alpha1() resourcev1alpha1.ResourceV1alpha1Interface + ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface SchedulingV1() schedulingv1.SchedulingV1Interface @@ -154,6 +156,7 @@ type Clientset struct { batchV1beta1 *batchv1beta1.BatchV1beta1Client certificatesV1 *certificatesv1.CertificatesV1Client certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client + certificatesV1alpha1 *certificatesv1alpha1.CertificatesV1alpha1Client coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client coordinationV1 *coordinationv1.CoordinationV1Client coreV1 *corev1.CoreV1Client @@ -177,7 +180,7 @@ type Clientset struct { rbacV1 *rbacv1.RbacV1Client rbacV1beta1 *rbacv1beta1.RbacV1beta1Client rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client - resourceV1alpha1 *resourcev1alpha1.ResourceV1alpha1Client + resourceV1alpha2 *resourcev1alpha2.ResourceV1alpha2Client schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client schedulingV1 *schedulingv1.SchedulingV1Client @@ -286,6 +289,11 @@ func (c *Clientset) CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta return c.certificatesV1beta1 } +// CertificatesV1alpha1 retrieves the CertificatesV1alpha1Client +func (c *Clientset) CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1alpha1Interface { + return c.certificatesV1alpha1 +} + // CoordinationV1beta1 retrieves the CoordinationV1beta1Client func (c *Clientset) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface { return c.coordinationV1beta1 @@ -401,9 +409,9 @@ func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { return c.rbacV1alpha1 } -// ResourceV1alpha1 retrieves the ResourceV1alpha1Client -func (c *Clientset) ResourceV1alpha1() resourcev1alpha1.ResourceV1alpha1Interface { - return c.resourceV1alpha1 +// ResourceV1alpha2 retrieves the ResourceV1alpha2Client +func (c *Clientset) ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface { + return c.resourceV1alpha2 } // SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client @@ -560,6 +568,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.certificatesV1alpha1, err = certificatesv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.coordinationV1beta1, err = coordinationv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -652,7 +664,7 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } - cs.resourceV1alpha1, err = resourcev1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + cs.resourceV1alpha2, err = resourcev1alpha2.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } @@ -721,6 +733,7 @@ func New(c rest.Interface) *Clientset { cs.batchV1beta1 = batchv1beta1.New(c) cs.certificatesV1 = certificatesv1.New(c) cs.certificatesV1beta1 = certificatesv1beta1.New(c) + cs.certificatesV1alpha1 = certificatesv1alpha1.New(c) cs.coordinationV1beta1 = coordinationv1beta1.New(c) cs.coordinationV1 = coordinationv1.New(c) cs.coreV1 = corev1.New(c) @@ -744,7 +757,7 @@ func New(c rest.Interface) *Clientset { cs.rbacV1 = rbacv1.New(c) cs.rbacV1beta1 = rbacv1beta1.New(c) cs.rbacV1alpha1 = rbacv1alpha1.New(c) - cs.resourceV1alpha1 = resourcev1alpha1.New(c) + cs.resourceV1alpha2 = resourcev1alpha2.New(c) cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) cs.schedulingV1beta1 = schedulingv1beta1.New(c) cs.schedulingV1 = schedulingv1.New(c) diff --git a/vendor/k8s.io/client-go/kubernetes/doc.go b/vendor/k8s.io/client-go/kubernetes/doc.go index b272334ad..9cef4242f 100644 --- a/vendor/k8s.io/client-go/kubernetes/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/doc.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated clientset. +// Package kubernetes holds packages which implement a clientset for Kubernetes +// APIs. package kubernetes diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index e43780529..64d3ce2a7 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -38,6 +38,7 @@ import ( batchv1 "k8s.io/api/batch/v1" batchv1beta1 "k8s.io/api/batch/v1beta1" certificatesv1 "k8s.io/api/certificates/v1" + certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" coordinationv1 "k8s.io/api/coordination/v1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" @@ -62,7 +63,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" @@ -100,6 +101,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ batchv1beta1.AddToScheme, certificatesv1.AddToScheme, certificatesv1beta1.AddToScheme, + certificatesv1alpha1.AddToScheme, coordinationv1beta1.AddToScheme, coordinationv1.AddToScheme, corev1.AddToScheme, @@ -123,7 +125,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ rbacv1.AddToScheme, rbacv1beta1.AddToScheme, rbacv1alpha1.AddToScheme, - resourcev1alpha1.AddToScheme, + resourcev1alpha2.AddToScheme, schedulingv1alpha1.AddToScheme, schedulingv1beta1.AddToScheme, schedulingv1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go index ba827f3c9..1d994b5ab 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -43,6 +43,7 @@ type ValidatingAdmissionPoliciesGetter interface { type ValidatingAdmissionPolicyInterface interface { Create(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) Update(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) + UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) @@ -50,6 +51,7 @@ type ValidatingAdmissionPolicyInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicy, err error) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) + ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) ValidatingAdmissionPolicyExpansion } @@ -132,6 +134,21 @@ func (c *validatingAdmissionPolicies) Update(ctx context.Context, validatingAdmi return } +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *validatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { + result = &v1alpha1.ValidatingAdmissionPolicy{} + err = c.client.Put(). + Resource("validatingadmissionpolicies"). + Name(validatingAdmissionPolicy.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(validatingAdmissionPolicy). + Do(ctx). + Into(result) + return +} + // Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs. func (c *validatingAdmissionPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { return c.client.Delete(). @@ -195,3 +212,32 @@ func (c *validatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmis Into(result) return } + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *validatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { + if validatingAdmissionPolicy == nil { + return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(validatingAdmissionPolicy) + if err != nil { + return nil, err + } + + name := validatingAdmissionPolicy.Name + if name == nil { + return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") + } + + result = &v1alpha1.ValidatingAdmissionPolicy{} + err = c.client.Patch(types.ApplyPatchType). + Resource("validatingadmissionpolicies"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go index 218cb60c3..7823729e0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go @@ -28,6 +28,7 @@ import ( type AuthenticationV1beta1Interface interface { RESTClient() rest.Interface + SelfSubjectReviewsGetter TokenReviewsGetter } @@ -36,6 +37,10 @@ type AuthenticationV1beta1Client struct { restClient rest.Interface } +func (c *AuthenticationV1beta1Client) SelfSubjectReviews() SelfSubjectReviewInterface { + return newSelfSubjectReviews(c) +} + func (c *AuthenticationV1beta1Client) TokenReviews() TokenReviewInterface { return newTokenReviews(c) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go index 60bf15ab9..527a458d7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go @@ -18,4 +18,6 @@ limitations under the License. package v1beta1 +type SelfSubjectReviewExpansion interface{} + type TokenReviewExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go new file mode 100644 index 000000000..9d54826a3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go @@ -0,0 +1,64 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + + v1beta1 "k8s.io/api/authentication/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// SelfSubjectReviewsGetter has a method to return a SelfSubjectReviewInterface. +// A group's client should implement this interface. +type SelfSubjectReviewsGetter interface { + SelfSubjectReviews() SelfSubjectReviewInterface +} + +// SelfSubjectReviewInterface has methods to work with SelfSubjectReview resources. +type SelfSubjectReviewInterface interface { + Create(ctx context.Context, selfSubjectReview *v1beta1.SelfSubjectReview, opts v1.CreateOptions) (*v1beta1.SelfSubjectReview, error) + SelfSubjectReviewExpansion +} + +// selfSubjectReviews implements SelfSubjectReviewInterface +type selfSubjectReviews struct { + client rest.Interface +} + +// newSelfSubjectReviews returns a SelfSubjectReviews +func newSelfSubjectReviews(c *AuthenticationV1beta1Client) *selfSubjectReviews { + return &selfSubjectReviews{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. +func (c *selfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1beta1.SelfSubjectReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectReview, err error) { + result = &v1beta1.SelfSubjectReview{} + err = c.client.Post(). + Resource("selfsubjectreviews"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(selfSubjectReview). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go new file mode 100644 index 000000000..a9050af94 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + v1alpha1 "k8s.io/api/certificates/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type CertificatesV1alpha1Interface interface { + RESTClient() rest.Interface + ClusterTrustBundlesGetter +} + +// CertificatesV1alpha1Client is used to interact with features provided by the certificates.k8s.io group. +type CertificatesV1alpha1Client struct { + restClient rest.Interface +} + +func (c *CertificatesV1alpha1Client) ClusterTrustBundles() ClusterTrustBundleInterface { + return newClusterTrustBundles(c) +} + +// NewForConfig creates a new CertificatesV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*CertificatesV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new CertificatesV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CertificatesV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &CertificatesV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new CertificatesV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CertificatesV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CertificatesV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *CertificatesV1alpha1Client { + return &CertificatesV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CertificatesV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go new file mode 100644 index 000000000..970fb15e6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go @@ -0,0 +1,197 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha1 "k8s.io/api/certificates/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + certificatesv1alpha1 "k8s.io/client-go/applyconfigurations/certificates/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ClusterTrustBundlesGetter has a method to return a ClusterTrustBundleInterface. +// A group's client should implement this interface. +type ClusterTrustBundlesGetter interface { + ClusterTrustBundles() ClusterTrustBundleInterface +} + +// ClusterTrustBundleInterface has methods to work with ClusterTrustBundle resources. +type ClusterTrustBundleInterface interface { + Create(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (*v1alpha1.ClusterTrustBundle, error) + Update(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (*v1alpha1.ClusterTrustBundle, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterTrustBundle, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterTrustBundleList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTrustBundle, err error) + Apply(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterTrustBundle, err error) + ClusterTrustBundleExpansion +} + +// clusterTrustBundles implements ClusterTrustBundleInterface +type clusterTrustBundles struct { + client rest.Interface +} + +// newClusterTrustBundles returns a ClusterTrustBundles +func newClusterTrustBundles(c *CertificatesV1alpha1Client) *clusterTrustBundles { + return &clusterTrustBundles{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterTrustBundle, and returns the corresponding clusterTrustBundle object, and an error if there is any. +func (c *clusterTrustBundles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterTrustBundle, err error) { + result = &v1alpha1.ClusterTrustBundle{} + err = c.client.Get(). + Resource("clustertrustbundles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterTrustBundles that match those selectors. +func (c *clusterTrustBundles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterTrustBundleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ClusterTrustBundleList{} + err = c.client.Get(). + Resource("clustertrustbundles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterTrustBundles. +func (c *clusterTrustBundles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clustertrustbundles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a clusterTrustBundle and creates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any. +func (c *clusterTrustBundles) Create(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (result *v1alpha1.ClusterTrustBundle, err error) { + result = &v1alpha1.ClusterTrustBundle{} + err = c.client.Post(). + Resource("clustertrustbundles"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterTrustBundle). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a clusterTrustBundle and updates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any. +func (c *clusterTrustBundles) Update(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (result *v1alpha1.ClusterTrustBundle, err error) { + result = &v1alpha1.ClusterTrustBundle{} + err = c.client.Put(). + Resource("clustertrustbundles"). + Name(clusterTrustBundle.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterTrustBundle). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the clusterTrustBundle and deletes it. Returns an error if one occurs. +func (c *clusterTrustBundles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clustertrustbundles"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterTrustBundles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clustertrustbundles"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched clusterTrustBundle. +func (c *clusterTrustBundles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTrustBundle, err error) { + result = &v1alpha1.ClusterTrustBundle{} + err = c.client.Patch(pt). + Resource("clustertrustbundles"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied clusterTrustBundle. +func (c *clusterTrustBundles) Apply(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterTrustBundle, err error) { + if clusterTrustBundle == nil { + return nil, fmt.Errorf("clusterTrustBundle provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(clusterTrustBundle) + if err != nil { + return nil, err + } + name := clusterTrustBundle.Name + if name == nil { + return nil, fmt.Errorf("clusterTrustBundle.Name must be provided to Apply") + } + result = &v1alpha1.ClusterTrustBundle{} + err = c.client.Patch(types.ApplyPatchType). + Resource("clustertrustbundles"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/doc.go similarity index 100% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/doc.go diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go similarity index 50% rename from vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go rename to vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go index d5af0d598..43cc534b3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,18 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -package admission +// Code generated by client-gen. DO NOT EDIT. -// DecoderInjector is used by the ControllerManager to inject decoder into webhook handlers. -type DecoderInjector interface { - InjectDecoder(*Decoder) error -} +package v1alpha1 -// InjectDecoderInto will set decoder on i and return the result if it implements Decoder. Returns -// false if i does not implement Decoder. -func InjectDecoderInto(decoder *Decoder, i interface{}) (bool, error) { - if s, ok := i.(DecoderInjector); ok { - return true, s.InjectDecoder(decoder) - } - return false, nil -} +type ClusterTrustBundleExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go index 464fff911..562f8d5e4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go @@ -82,8 +82,7 @@ func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, // It returns the copy of the event that the server returns, or an error. // The namespace and name of the target event is deduced from the event. // The namespace must either match this event client's namespace, or this event client must -// -// have been created with the "" namespace. +// have been created with the "" namespace. func (e *events) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) { if e.ns != "" && event.Namespace != e.ns { return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go index 827b514df..4725d2cd1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -32,7 +32,6 @@ type ExtensionsV1beta1Interface interface { DeploymentsGetter IngressesGetter NetworkPoliciesGetter - PodSecurityPoliciesGetter ReplicaSetsGetter } @@ -57,10 +56,6 @@ func (c *ExtensionsV1beta1Client) NetworkPolicies(namespace string) NetworkPolic return newNetworkPolicies(c, namespace) } -func (c *ExtensionsV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { - return newPodSecurityPolicies(c) -} - func (c *ExtensionsV1beta1Client) ReplicaSets(namespace string) ReplicaSetInterface { return newReplicaSets(c, namespace) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go index 41d28f041..67fcf4992 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go @@ -24,6 +24,4 @@ type IngressExpansion interface{} type NetworkPolicyExpansion interface{} -type PodSecurityPolicyExpansion interface{} - type ReplicaSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go deleted file mode 100644 index 3f38c3133..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. -// A group's client should implement this interface. -type PodSecurityPoliciesGetter interface { - PodSecurityPolicies() PodSecurityPolicyInterface -} - -// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. -type PodSecurityPolicyInterface interface { - Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (*v1beta1.PodSecurityPolicy, error) - Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (*v1beta1.PodSecurityPolicy, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) - Apply(ctx context.Context, podSecurityPolicy *extensionsv1beta1.PodSecurityPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodSecurityPolicy, err error) - PodSecurityPolicyExpansion -} - -// podSecurityPolicies implements PodSecurityPolicyInterface -type podSecurityPolicies struct { - client rest.Interface -} - -// newPodSecurityPolicies returns a PodSecurityPolicies -func newPodSecurityPolicies(c *ExtensionsV1beta1Client) *podSecurityPolicies { - return &podSecurityPolicies{ - client: c.RESTClient(), - } -} - -// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. -func (c *podSecurityPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. -func (c *podSecurityPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.PodSecurityPolicyList{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podSecurityPolicies. -func (c *podSecurityPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Post(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSecurityPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Put(). - Resource("podsecuritypolicies"). - Name(podSecurityPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSecurityPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. -func (c *podSecurityPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("podsecuritypolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podSecurityPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("podsecuritypolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podSecurityPolicy. -func (c *podSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Patch(pt). - Resource("podsecuritypolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podSecurityPolicy. -func (c *podSecurityPolicies) Apply(ctx context.Context, podSecurityPolicy *extensionsv1beta1.PodSecurityPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodSecurityPolicy, err error) { - if podSecurityPolicy == nil { - return nil, fmt.Errorf("podSecurityPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podSecurityPolicy) - if err != nil { - return nil, err - } - name := podSecurityPolicy.Name - if name == nil { - return nil, fmt.Errorf("podSecurityPolicy.Name must be provided to Apply") - } - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("podsecuritypolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go index ab41abb7d..9c2979d6c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go @@ -19,3 +19,5 @@ limitations under the License. package v1alpha1 type ClusterCIDRExpansion interface{} + +type IPAddressExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go new file mode 100644 index 000000000..fff193d68 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go @@ -0,0 +1,197 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha1 "k8s.io/api/networking/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// IPAddressesGetter has a method to return a IPAddressInterface. +// A group's client should implement this interface. +type IPAddressesGetter interface { + IPAddresses() IPAddressInterface +} + +// IPAddressInterface has methods to work with IPAddress resources. +type IPAddressInterface interface { + Create(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.CreateOptions) (*v1alpha1.IPAddress, error) + Update(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.UpdateOptions) (*v1alpha1.IPAddress, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.IPAddress, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.IPAddressList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAddress, err error) + Apply(ctx context.Context, iPAddress *networkingv1alpha1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.IPAddress, err error) + IPAddressExpansion +} + +// iPAddresses implements IPAddressInterface +type iPAddresses struct { + client rest.Interface +} + +// newIPAddresses returns a IPAddresses +func newIPAddresses(c *NetworkingV1alpha1Client) *iPAddresses { + return &iPAddresses{ + client: c.RESTClient(), + } +} + +// Get takes name of the iPAddress, and returns the corresponding iPAddress object, and an error if there is any. +func (c *iPAddresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IPAddress, err error) { + result = &v1alpha1.IPAddress{} + err = c.client.Get(). + Resource("ipaddresses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of IPAddresses that match those selectors. +func (c *iPAddresses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IPAddressList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.IPAddressList{} + err = c.client.Get(). + Resource("ipaddresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested iPAddresses. +func (c *iPAddresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("ipaddresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a iPAddress and creates it. Returns the server's representation of the iPAddress, and an error, if there is any. +func (c *iPAddresses) Create(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.CreateOptions) (result *v1alpha1.IPAddress, err error) { + result = &v1alpha1.IPAddress{} + err = c.client.Post(). + Resource("ipaddresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(iPAddress). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a iPAddress and updates it. Returns the server's representation of the iPAddress, and an error, if there is any. +func (c *iPAddresses) Update(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.UpdateOptions) (result *v1alpha1.IPAddress, err error) { + result = &v1alpha1.IPAddress{} + err = c.client.Put(). + Resource("ipaddresses"). + Name(iPAddress.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(iPAddress). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the iPAddress and deletes it. Returns an error if one occurs. +func (c *iPAddresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("ipaddresses"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *iPAddresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("ipaddresses"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched iPAddress. +func (c *iPAddresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAddress, err error) { + result = &v1alpha1.IPAddress{} + err = c.client.Patch(pt). + Resource("ipaddresses"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied iPAddress. +func (c *iPAddresses) Apply(ctx context.Context, iPAddress *networkingv1alpha1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.IPAddress, err error) { + if iPAddress == nil { + return nil, fmt.Errorf("iPAddress provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(iPAddress) + if err != nil { + return nil, err + } + name := iPAddress.Name + if name == nil { + return nil, fmt.Errorf("iPAddress.Name must be provided to Apply") + } + result = &v1alpha1.IPAddress{} + err = c.client.Patch(types.ApplyPatchType). + Resource("ipaddresses"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go index ccb593316..884c846f5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go @@ -29,6 +29,7 @@ import ( type NetworkingV1alpha1Interface interface { RESTClient() rest.Interface ClusterCIDRsGetter + IPAddressesGetter } // NetworkingV1alpha1Client is used to interact with features provided by the networking.k8s.io group. @@ -40,6 +41,10 @@ func (c *NetworkingV1alpha1Client) ClusterCIDRs() ClusterCIDRInterface { return newClusterCIDRs(c) } +func (c *NetworkingV1alpha1Client) IPAddresses() IPAddressInterface { + return newIPAddresses(c) +} + // NewForConfig creates a new NetworkingV1alpha1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/podscheduling.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/podscheduling.go deleted file mode 100644 index e163a8456..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/podscheduling.go +++ /dev/null @@ -1,256 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha1 "k8s.io/api/resource/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodSchedulingsGetter has a method to return a PodSchedulingInterface. -// A group's client should implement this interface. -type PodSchedulingsGetter interface { - PodSchedulings(namespace string) PodSchedulingInterface -} - -// PodSchedulingInterface has methods to work with PodScheduling resources. -type PodSchedulingInterface interface { - Create(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.CreateOptions) (*v1alpha1.PodScheduling, error) - Update(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.UpdateOptions) (*v1alpha1.PodScheduling, error) - UpdateStatus(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.UpdateOptions) (*v1alpha1.PodScheduling, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PodScheduling, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PodSchedulingList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodScheduling, err error) - Apply(ctx context.Context, podScheduling *resourcev1alpha1.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PodScheduling, err error) - ApplyStatus(ctx context.Context, podScheduling *resourcev1alpha1.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PodScheduling, err error) - PodSchedulingExpansion -} - -// podSchedulings implements PodSchedulingInterface -type podSchedulings struct { - client rest.Interface - ns string -} - -// newPodSchedulings returns a PodSchedulings -func newPodSchedulings(c *ResourceV1alpha1Client, namespace string) *podSchedulings { - return &podSchedulings{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the podScheduling, and returns the corresponding podScheduling object, and an error if there is any. -func (c *podSchedulings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PodScheduling, err error) { - result = &v1alpha1.PodScheduling{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podschedulings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodSchedulings that match those selectors. -func (c *podSchedulings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PodSchedulingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.PodSchedulingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podschedulings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podSchedulings. -func (c *podSchedulings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("podschedulings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podScheduling and creates it. Returns the server's representation of the podScheduling, and an error, if there is any. -func (c *podSchedulings) Create(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.CreateOptions) (result *v1alpha1.PodScheduling, err error) { - result = &v1alpha1.PodScheduling{} - err = c.client.Post(). - Namespace(c.ns). - Resource("podschedulings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podScheduling). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podScheduling and updates it. Returns the server's representation of the podScheduling, and an error, if there is any. -func (c *podSchedulings) Update(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.UpdateOptions) (result *v1alpha1.PodScheduling, err error) { - result = &v1alpha1.PodScheduling{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podschedulings"). - Name(podScheduling.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podScheduling). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *podSchedulings) UpdateStatus(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.UpdateOptions) (result *v1alpha1.PodScheduling, err error) { - result = &v1alpha1.PodScheduling{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podschedulings"). - Name(podScheduling.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podScheduling). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podScheduling and deletes it. Returns an error if one occurs. -func (c *podSchedulings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podschedulings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podSchedulings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("podschedulings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podScheduling. -func (c *podSchedulings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodScheduling, err error) { - result = &v1alpha1.PodScheduling{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("podschedulings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podScheduling. -func (c *podSchedulings) Apply(ctx context.Context, podScheduling *resourcev1alpha1.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PodScheduling, err error) { - if podScheduling == nil { - return nil, fmt.Errorf("podScheduling provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podScheduling) - if err != nil { - return nil, err - } - name := podScheduling.Name - if name == nil { - return nil, fmt.Errorf("podScheduling.Name must be provided to Apply") - } - result = &v1alpha1.PodScheduling{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("podschedulings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *podSchedulings) ApplyStatus(ctx context.Context, podScheduling *resourcev1alpha1.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PodScheduling, err error) { - if podScheduling == nil { - return nil, fmt.Errorf("podScheduling provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podScheduling) - if err != nil { - return nil, err - } - - name := podScheduling.Name - if name == nil { - return nil, fmt.Errorf("podScheduling.Name must be provided to Apply") - } - - result = &v1alpha1.PodScheduling{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("podschedulings"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go new file mode 100644 index 000000000..baaf2d985 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha2 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go similarity index 92% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go index df88c2f93..2c02e9ce7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go @@ -16,9 +16,9 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 -type PodSchedulingExpansion interface{} +type PodSchedulingContextExpansion interface{} type ResourceClaimExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go new file mode 100644 index 000000000..72e81a29e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go @@ -0,0 +1,256 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha2 "k8s.io/api/resource/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PodSchedulingContextsGetter has a method to return a PodSchedulingContextInterface. +// A group's client should implement this interface. +type PodSchedulingContextsGetter interface { + PodSchedulingContexts(namespace string) PodSchedulingContextInterface +} + +// PodSchedulingContextInterface has methods to work with PodSchedulingContext resources. +type PodSchedulingContextInterface interface { + Create(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.CreateOptions) (*v1alpha2.PodSchedulingContext, error) + Update(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha2.PodSchedulingContext, error) + UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha2.PodSchedulingContext, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.PodSchedulingContext, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.PodSchedulingContextList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodSchedulingContext, err error) + Apply(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) + ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) + PodSchedulingContextExpansion +} + +// podSchedulingContexts implements PodSchedulingContextInterface +type podSchedulingContexts struct { + client rest.Interface + ns string +} + +// newPodSchedulingContexts returns a PodSchedulingContexts +func newPodSchedulingContexts(c *ResourceV1alpha2Client, namespace string) *podSchedulingContexts { + return &podSchedulingContexts{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the podSchedulingContext, and returns the corresponding podSchedulingContext object, and an error if there is any. +func (c *podSchedulingContexts) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.PodSchedulingContext, err error) { + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodSchedulingContexts that match those selectors. +func (c *podSchedulingContexts) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.PodSchedulingContextList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha2.PodSchedulingContextList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podSchedulingContexts. +func (c *podSchedulingContexts) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a podSchedulingContext and creates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. +func (c *podSchedulingContexts) Create(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.CreateOptions) (result *v1alpha2.PodSchedulingContext, err error) { + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Post(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podSchedulingContext). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a podSchedulingContext and updates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. +func (c *podSchedulingContexts) Update(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha2.PodSchedulingContext, err error) { + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(podSchedulingContext.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podSchedulingContext). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *podSchedulingContexts) UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha2.PodSchedulingContext, err error) { + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(podSchedulingContext.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podSchedulingContext). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the podSchedulingContext and deletes it. Returns an error if one occurs. +func (c *podSchedulingContexts) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podSchedulingContexts) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched podSchedulingContext. +func (c *podSchedulingContexts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodSchedulingContext, err error) { + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied podSchedulingContext. +func (c *podSchedulingContexts) Apply(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) { + if podSchedulingContext == nil { + return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(podSchedulingContext) + if err != nil { + return nil, err + } + name := podSchedulingContext.Name + if name == nil { + return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") + } + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *podSchedulingContexts) ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) { + if podSchedulingContext == nil { + return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(podSchedulingContext) + if err != nil { + return nil, err + } + + name := podSchedulingContext.Name + if name == nil { + return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") + } + + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resource_client.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go similarity index 66% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resource_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go index 2355bf7cc..d5795fd62 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resource_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go @@ -16,49 +16,49 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( "net/http" - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) -type ResourceV1alpha1Interface interface { +type ResourceV1alpha2Interface interface { RESTClient() rest.Interface - PodSchedulingsGetter + PodSchedulingContextsGetter ResourceClaimsGetter ResourceClaimTemplatesGetter ResourceClassesGetter } -// ResourceV1alpha1Client is used to interact with features provided by the resource.k8s.io group. -type ResourceV1alpha1Client struct { +// ResourceV1alpha2Client is used to interact with features provided by the resource.k8s.io group. +type ResourceV1alpha2Client struct { restClient rest.Interface } -func (c *ResourceV1alpha1Client) PodSchedulings(namespace string) PodSchedulingInterface { - return newPodSchedulings(c, namespace) +func (c *ResourceV1alpha2Client) PodSchedulingContexts(namespace string) PodSchedulingContextInterface { + return newPodSchedulingContexts(c, namespace) } -func (c *ResourceV1alpha1Client) ResourceClaims(namespace string) ResourceClaimInterface { +func (c *ResourceV1alpha2Client) ResourceClaims(namespace string) ResourceClaimInterface { return newResourceClaims(c, namespace) } -func (c *ResourceV1alpha1Client) ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface { +func (c *ResourceV1alpha2Client) ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface { return newResourceClaimTemplates(c, namespace) } -func (c *ResourceV1alpha1Client) ResourceClasses() ResourceClassInterface { +func (c *ResourceV1alpha2Client) ResourceClasses() ResourceClassInterface { return newResourceClasses(c) } -// NewForConfig creates a new ResourceV1alpha1Client for the given config. +// NewForConfig creates a new ResourceV1alpha2Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*ResourceV1alpha1Client, error) { +func NewForConfig(c *rest.Config) (*ResourceV1alpha2Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -70,9 +70,9 @@ func NewForConfig(c *rest.Config) (*ResourceV1alpha1Client, error) { return NewForConfigAndClient(&config, httpClient) } -// NewForConfigAndClient creates a new ResourceV1alpha1Client for the given config and http client. +// NewForConfigAndClient creates a new ResourceV1alpha2Client for the given config and http client. // Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha1Client, error) { +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha2Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -81,12 +81,12 @@ func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha1Cli if err != nil { return nil, err } - return &ResourceV1alpha1Client{client}, nil + return &ResourceV1alpha2Client{client}, nil } -// NewForConfigOrDie creates a new ResourceV1alpha1Client for the given config and +// NewForConfigOrDie creates a new ResourceV1alpha2Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha1Client { +func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha2Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -94,13 +94,13 @@ func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha1Client { return client } -// New creates a new ResourceV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *ResourceV1alpha1Client { - return &ResourceV1alpha1Client{c} +// New creates a new ResourceV1alpha2Client for the given RESTClient. +func New(c rest.Interface) *ResourceV1alpha2Client { + return &ResourceV1alpha2Client{c} } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := v1alpha2.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() @@ -114,7 +114,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *ResourceV1alpha1Client) RESTClient() rest.Interface { +func (c *ResourceV1alpha2Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go similarity index 79% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaim.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go index cd2d0c782..cfb27c9db 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaim.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( "context" @@ -24,11 +24,11 @@ import ( "fmt" "time" - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -41,17 +41,17 @@ type ResourceClaimsGetter interface { // ResourceClaimInterface has methods to work with ResourceClaim resources. type ResourceClaimInterface interface { - Create(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.CreateOptions) (*v1alpha1.ResourceClaim, error) - Update(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.UpdateOptions) (*v1alpha1.ResourceClaim, error) - UpdateStatus(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.UpdateOptions) (*v1alpha1.ResourceClaim, error) + Create(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.CreateOptions) (*v1alpha2.ResourceClaim, error) + Update(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (*v1alpha2.ResourceClaim, error) + UpdateStatus(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (*v1alpha2.ResourceClaim, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ResourceClaim, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ResourceClaimList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClaim, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClaimList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClaim, err error) - Apply(ctx context.Context, resourceClaim *resourcev1alpha1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaim, err error) - ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaim, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaim, err error) + Apply(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) + ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) ResourceClaimExpansion } @@ -62,7 +62,7 @@ type resourceClaims struct { } // newResourceClaims returns a ResourceClaims -func newResourceClaims(c *ResourceV1alpha1Client, namespace string) *resourceClaims { +func newResourceClaims(c *ResourceV1alpha2Client, namespace string) *resourceClaims { return &resourceClaims{ client: c.RESTClient(), ns: namespace, @@ -70,8 +70,8 @@ func newResourceClaims(c *ResourceV1alpha1Client, namespace string) *resourceCla } // Get takes name of the resourceClaim, and returns the corresponding resourceClaim object, and an error if there is any. -func (c *resourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceClaim, err error) { - result = &v1alpha1.ResourceClaim{} +func (c *resourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} err = c.client.Get(). Namespace(c.ns). Resource("resourceclaims"). @@ -83,12 +83,12 @@ func (c *resourceClaims) Get(ctx context.Context, name string, options v1.GetOpt } // List takes label and field selectors, and returns the list of ResourceClaims that match those selectors. -func (c *resourceClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceClaimList, err error) { +func (c *resourceClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.ResourceClaimList{} + result = &v1alpha2.ResourceClaimList{} err = c.client.Get(). Namespace(c.ns). Resource("resourceclaims"). @@ -115,8 +115,8 @@ func (c *resourceClaims) Watch(ctx context.Context, opts v1.ListOptions) (watch. } // Create takes the representation of a resourceClaim and creates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *resourceClaims) Create(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.CreateOptions) (result *v1alpha1.ResourceClaim, err error) { - result = &v1alpha1.ResourceClaim{} +func (c *resourceClaims) Create(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.CreateOptions) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} err = c.client.Post(). Namespace(c.ns). Resource("resourceclaims"). @@ -128,8 +128,8 @@ func (c *resourceClaims) Create(ctx context.Context, resourceClaim *v1alpha1.Res } // Update takes the representation of a resourceClaim and updates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *resourceClaims) Update(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha1.ResourceClaim, err error) { - result = &v1alpha1.ResourceClaim{} +func (c *resourceClaims) Update(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} err = c.client.Put(). Namespace(c.ns). Resource("resourceclaims"). @@ -143,8 +143,8 @@ func (c *resourceClaims) Update(ctx context.Context, resourceClaim *v1alpha1.Res // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *resourceClaims) UpdateStatus(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha1.ResourceClaim, err error) { - result = &v1alpha1.ResourceClaim{} +func (c *resourceClaims) UpdateStatus(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} err = c.client.Put(). Namespace(c.ns). Resource("resourceclaims"). @@ -185,8 +185,8 @@ func (c *resourceClaims) DeleteCollection(ctx context.Context, opts v1.DeleteOpt } // Patch applies the patch and returns the patched resourceClaim. -func (c *resourceClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClaim, err error) { - result = &v1alpha1.ResourceClaim{} +func (c *resourceClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} err = c.client.Patch(pt). Namespace(c.ns). Resource("resourceclaims"). @@ -200,7 +200,7 @@ func (c *resourceClaims) Patch(ctx context.Context, name string, pt types.PatchT } // Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaim. -func (c *resourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alpha1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaim, err error) { +func (c *resourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { if resourceClaim == nil { return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") } @@ -213,7 +213,7 @@ func (c *resourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alp if name == nil { return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") } - result = &v1alpha1.ResourceClaim{} + result = &v1alpha2.ResourceClaim{} err = c.client.Patch(types.ApplyPatchType). Namespace(c.ns). Resource("resourceclaims"). @@ -227,7 +227,7 @@ func (c *resourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alp // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *resourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaim, err error) { +func (c *resourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { if resourceClaim == nil { return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") } @@ -242,7 +242,7 @@ func (c *resourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourc return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") } - result = &v1alpha1.ResourceClaim{} + result = &v1alpha2.ResourceClaim{} err = c.client.Patch(types.ApplyPatchType). Namespace(c.ns). Resource("resourceclaims"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaimtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go similarity index 80% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaimtemplate.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go index b6cc3d96e..3f4e32006 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaimtemplate.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( "context" @@ -24,11 +24,11 @@ import ( "fmt" "time" - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -41,15 +41,15 @@ type ResourceClaimTemplatesGetter interface { // ResourceClaimTemplateInterface has methods to work with ResourceClaimTemplate resources. type ResourceClaimTemplateInterface interface { - Create(ctx context.Context, resourceClaimTemplate *v1alpha1.ResourceClaimTemplate, opts v1.CreateOptions) (*v1alpha1.ResourceClaimTemplate, error) - Update(ctx context.Context, resourceClaimTemplate *v1alpha1.ResourceClaimTemplate, opts v1.UpdateOptions) (*v1alpha1.ResourceClaimTemplate, error) + Create(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.CreateOptions) (*v1alpha2.ResourceClaimTemplate, error) + Update(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.UpdateOptions) (*v1alpha2.ResourceClaimTemplate, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ResourceClaimTemplate, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ResourceClaimTemplateList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClaimTemplate, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClaimTemplateList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClaimTemplate, err error) - Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha1.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaimTemplate, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimTemplate, err error) + Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimTemplate, err error) ResourceClaimTemplateExpansion } @@ -60,7 +60,7 @@ type resourceClaimTemplates struct { } // newResourceClaimTemplates returns a ResourceClaimTemplates -func newResourceClaimTemplates(c *ResourceV1alpha1Client, namespace string) *resourceClaimTemplates { +func newResourceClaimTemplates(c *ResourceV1alpha2Client, namespace string) *resourceClaimTemplates { return &resourceClaimTemplates{ client: c.RESTClient(), ns: namespace, @@ -68,8 +68,8 @@ func newResourceClaimTemplates(c *ResourceV1alpha1Client, namespace string) *res } // Get takes name of the resourceClaimTemplate, and returns the corresponding resourceClaimTemplate object, and an error if there is any. -func (c *resourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceClaimTemplate, err error) { - result = &v1alpha1.ResourceClaimTemplate{} +func (c *resourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { + result = &v1alpha2.ResourceClaimTemplate{} err = c.client.Get(). Namespace(c.ns). Resource("resourceclaimtemplates"). @@ -81,12 +81,12 @@ func (c *resourceClaimTemplates) Get(ctx context.Context, name string, options v } // List takes label and field selectors, and returns the list of ResourceClaimTemplates that match those selectors. -func (c *resourceClaimTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceClaimTemplateList, err error) { +func (c *resourceClaimTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimTemplateList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.ResourceClaimTemplateList{} + result = &v1alpha2.ResourceClaimTemplateList{} err = c.client.Get(). Namespace(c.ns). Resource("resourceclaimtemplates"). @@ -113,8 +113,8 @@ func (c *resourceClaimTemplates) Watch(ctx context.Context, opts v1.ListOptions) } // Create takes the representation of a resourceClaimTemplate and creates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *resourceClaimTemplates) Create(ctx context.Context, resourceClaimTemplate *v1alpha1.ResourceClaimTemplate, opts v1.CreateOptions) (result *v1alpha1.ResourceClaimTemplate, err error) { - result = &v1alpha1.ResourceClaimTemplate{} +func (c *resourceClaimTemplates) Create(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.CreateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { + result = &v1alpha2.ResourceClaimTemplate{} err = c.client.Post(). Namespace(c.ns). Resource("resourceclaimtemplates"). @@ -126,8 +126,8 @@ func (c *resourceClaimTemplates) Create(ctx context.Context, resourceClaimTempla } // Update takes the representation of a resourceClaimTemplate and updates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *resourceClaimTemplates) Update(ctx context.Context, resourceClaimTemplate *v1alpha1.ResourceClaimTemplate, opts v1.UpdateOptions) (result *v1alpha1.ResourceClaimTemplate, err error) { - result = &v1alpha1.ResourceClaimTemplate{} +func (c *resourceClaimTemplates) Update(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { + result = &v1alpha2.ResourceClaimTemplate{} err = c.client.Put(). Namespace(c.ns). Resource("resourceclaimtemplates"). @@ -167,8 +167,8 @@ func (c *resourceClaimTemplates) DeleteCollection(ctx context.Context, opts v1.D } // Patch applies the patch and returns the patched resourceClaimTemplate. -func (c *resourceClaimTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClaimTemplate, err error) { - result = &v1alpha1.ResourceClaimTemplate{} +func (c *resourceClaimTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimTemplate, err error) { + result = &v1alpha2.ResourceClaimTemplate{} err = c.client.Patch(pt). Namespace(c.ns). Resource("resourceclaimtemplates"). @@ -182,7 +182,7 @@ func (c *resourceClaimTemplates) Patch(ctx context.Context, name string, pt type } // Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaimTemplate. -func (c *resourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha1.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaimTemplate, err error) { +func (c *resourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { if resourceClaimTemplate == nil { return nil, fmt.Errorf("resourceClaimTemplate provided to Apply must not be nil") } @@ -195,7 +195,7 @@ func (c *resourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplat if name == nil { return nil, fmt.Errorf("resourceClaimTemplate.Name must be provided to Apply") } - result = &v1alpha1.ResourceClaimTemplate{} + result = &v1alpha2.ResourceClaimTemplate{} err = c.client.Patch(types.ApplyPatchType). Namespace(c.ns). Resource("resourceclaimtemplates"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclass.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go similarity index 80% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclass.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go index 9c8b45463..95a4ac566 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( "context" @@ -24,11 +24,11 @@ import ( "fmt" "time" - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -41,15 +41,15 @@ type ResourceClassesGetter interface { // ResourceClassInterface has methods to work with ResourceClass resources. type ResourceClassInterface interface { - Create(ctx context.Context, resourceClass *v1alpha1.ResourceClass, opts v1.CreateOptions) (*v1alpha1.ResourceClass, error) - Update(ctx context.Context, resourceClass *v1alpha1.ResourceClass, opts v1.UpdateOptions) (*v1alpha1.ResourceClass, error) + Create(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.CreateOptions) (*v1alpha2.ResourceClass, error) + Update(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.UpdateOptions) (*v1alpha2.ResourceClass, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ResourceClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ResourceClassList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClass, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClass, err error) - Apply(ctx context.Context, resourceClass *resourcev1alpha1.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClass, err error) + Apply(ctx context.Context, resourceClass *resourcev1alpha2.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClass, err error) ResourceClassExpansion } @@ -59,15 +59,15 @@ type resourceClasses struct { } // newResourceClasses returns a ResourceClasses -func newResourceClasses(c *ResourceV1alpha1Client) *resourceClasses { +func newResourceClasses(c *ResourceV1alpha2Client) *resourceClasses { return &resourceClasses{ client: c.RESTClient(), } } // Get takes name of the resourceClass, and returns the corresponding resourceClass object, and an error if there is any. -func (c *resourceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceClass, err error) { - result = &v1alpha1.ResourceClass{} +func (c *resourceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClass, err error) { + result = &v1alpha2.ResourceClass{} err = c.client.Get(). Resource("resourceclasses"). Name(name). @@ -78,12 +78,12 @@ func (c *resourceClasses) Get(ctx context.Context, name string, options v1.GetOp } // List takes label and field selectors, and returns the list of ResourceClasses that match those selectors. -func (c *resourceClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceClassList, err error) { +func (c *resourceClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClassList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.ResourceClassList{} + result = &v1alpha2.ResourceClassList{} err = c.client.Get(). Resource("resourceclasses"). VersionedParams(&opts, scheme.ParameterCodec). @@ -108,8 +108,8 @@ func (c *resourceClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch } // Create takes the representation of a resourceClass and creates it. Returns the server's representation of the resourceClass, and an error, if there is any. -func (c *resourceClasses) Create(ctx context.Context, resourceClass *v1alpha1.ResourceClass, opts v1.CreateOptions) (result *v1alpha1.ResourceClass, err error) { - result = &v1alpha1.ResourceClass{} +func (c *resourceClasses) Create(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.CreateOptions) (result *v1alpha2.ResourceClass, err error) { + result = &v1alpha2.ResourceClass{} err = c.client.Post(). Resource("resourceclasses"). VersionedParams(&opts, scheme.ParameterCodec). @@ -120,8 +120,8 @@ func (c *resourceClasses) Create(ctx context.Context, resourceClass *v1alpha1.Re } // Update takes the representation of a resourceClass and updates it. Returns the server's representation of the resourceClass, and an error, if there is any. -func (c *resourceClasses) Update(ctx context.Context, resourceClass *v1alpha1.ResourceClass, opts v1.UpdateOptions) (result *v1alpha1.ResourceClass, err error) { - result = &v1alpha1.ResourceClass{} +func (c *resourceClasses) Update(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.UpdateOptions) (result *v1alpha2.ResourceClass, err error) { + result = &v1alpha2.ResourceClass{} err = c.client.Put(). Resource("resourceclasses"). Name(resourceClass.Name). @@ -158,8 +158,8 @@ func (c *resourceClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOp } // Patch applies the patch and returns the patched resourceClass. -func (c *resourceClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClass, err error) { - result = &v1alpha1.ResourceClass{} +func (c *resourceClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClass, err error) { + result = &v1alpha2.ResourceClass{} err = c.client.Patch(pt). Resource("resourceclasses"). Name(name). @@ -172,7 +172,7 @@ func (c *resourceClasses) Patch(ctx context.Context, name string, pt types.Patch } // Apply takes the given apply declarative configuration, applies it and returns the applied resourceClass. -func (c *resourceClasses) Apply(ctx context.Context, resourceClass *resourcev1alpha1.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClass, err error) { +func (c *resourceClasses) Apply(ctx context.Context, resourceClass *resourcev1alpha2.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClass, err error) { if resourceClass == nil { return nil, fmt.Errorf("resourceClass provided to Apply must not be nil") } @@ -185,7 +185,7 @@ func (c *resourceClasses) Apply(ctx context.Context, resourceClass *resourcev1al if name == nil { return nil, fmt.Errorf("resourceClass.Name must be provided to Apply") } - result = &v1alpha1.ResourceClass{} + result = &v1alpha2.ResourceClass{} err = c.client.Patch(types.ApplyPatchType). Resource("resourceclasses"). Name(*name). diff --git a/vendor/k8s.io/client-go/openapi/OWNERS b/vendor/k8s.io/client-go/openapi/OWNERS new file mode 100644 index 000000000..e61009424 --- /dev/null +++ b/vendor/k8s.io/client-go/openapi/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - apelisse diff --git a/vendor/k8s.io/client-go/openapi/client.go b/vendor/k8s.io/client-go/openapi/client.go index 7b58762ac..6a4305718 100644 --- a/vendor/k8s.io/client-go/openapi/client.go +++ b/vendor/k8s.io/client-go/openapi/client.go @@ -19,6 +19,7 @@ package openapi import ( "context" "encoding/json" + "strings" "k8s.io/client-go/rest" "k8s.io/kube-openapi/pkg/handler3" @@ -58,7 +59,11 @@ func (c *client) Paths() (map[string]GroupVersion, error) { // Create GroupVersions for each element of the result result := map[string]GroupVersion{} for k, v := range discoMap.Paths { - result[k] = newGroupVersion(c, v) + // If the server returned a URL rooted at /openapi/v3, preserve any additional client-side prefix. + // If the server returned a URL not rooted at /openapi/v3, treat it as an actual server-relative URL. + // See https://github.com/kubernetes/kubernetes/issues/117463 for details + useClientPrefix := strings.HasPrefix(v.ServerRelativeURL, "/openapi/v3") + result[k] = newGroupVersion(c, v, useClientPrefix) } return result, nil } diff --git a/vendor/k8s.io/client-go/openapi/groupversion.go b/vendor/k8s.io/client-go/openapi/groupversion.go index 32133a29b..601dcbe3c 100644 --- a/vendor/k8s.io/client-go/openapi/groupversion.go +++ b/vendor/k8s.io/client-go/openapi/groupversion.go @@ -18,6 +18,7 @@ package openapi import ( "context" + "net/url" "k8s.io/kube-openapi/pkg/handler3" ) @@ -29,18 +30,41 @@ type GroupVersion interface { } type groupversion struct { - client *client - item handler3.OpenAPIV3DiscoveryGroupVersion + client *client + item handler3.OpenAPIV3DiscoveryGroupVersion + useClientPrefix bool } -func newGroupVersion(client *client, item handler3.OpenAPIV3DiscoveryGroupVersion) *groupversion { - return &groupversion{client: client, item: item} +func newGroupVersion(client *client, item handler3.OpenAPIV3DiscoveryGroupVersion, useClientPrefix bool) *groupversion { + return &groupversion{client: client, item: item, useClientPrefix: useClientPrefix} } func (g *groupversion) Schema(contentType string) ([]byte, error) { - return g.client.restClient.Get(). - RequestURI(g.item.ServerRelativeURL). - SetHeader("Accept", contentType). - Do(context.TODO()). - Raw() + if !g.useClientPrefix { + return g.client.restClient.Get(). + RequestURI(g.item.ServerRelativeURL). + SetHeader("Accept", contentType). + Do(context.TODO()). + Raw() + } + + locator, err := url.Parse(g.item.ServerRelativeURL) + if err != nil { + return nil, err + } + + path := g.client.restClient.Get(). + AbsPath(locator.Path). + SetHeader("Accept", contentType) + + // Other than root endpoints(openapiv3/apis), resources have hash query parameter to support etags. + // However, absPath does not support handling query parameters internally, + // so that hash query parameter is added manually + for k, value := range locator.Query() { + for _, v := range value { + path.Param(k, v) + } + } + + return path.Do(context.TODO()).Raw() } diff --git a/vendor/k8s.io/client-go/pkg/version/base.go b/vendor/k8s.io/client-go/pkg/version/base.go index 51e34dda3..676d51d32 100644 --- a/vendor/k8s.io/client-go/pkg/version/base.go +++ b/vendor/k8s.io/client-go/pkg/version/base.go @@ -43,7 +43,8 @@ var ( gitMinor string = "" // minor version, numeric possibly followed by "+" // semantic version, derived by build scripts (see - // https://git.k8s.io/community/contributors/design-proposals/release/versioning.md + // https://github.com/kubernetes/sig-release/blob/master/release-engineering/versioning.md#kubernetes-release-versioning + // https://kubernetes.io/releases/version-skew-policy/ // for a detailed discussion of this field) // // TODO: This field is still called "gitVersion" for legacy diff --git a/vendor/k8s.io/client-go/rest/client.go b/vendor/k8s.io/client-go/rest/client.go index 2cf821bcd..60df7e568 100644 --- a/vendor/k8s.io/client-go/rest/client.go +++ b/vendor/k8s.io/client-go/rest/client.go @@ -52,8 +52,7 @@ type Interface interface { // ClientContentConfig controls how RESTClient communicates with the server. // // TODO: ContentConfig will be updated to accept a Negotiator instead of a -// -// NegotiatedSerializer and NegotiatedSerializer will be removed. +// NegotiatedSerializer and NegotiatedSerializer will be removed. type ClientContentConfig struct { // AcceptContentTypes specifies the types the client will accept and is optional. // If not set, ContentType will be used to define the Accept header diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index 96e725692..bb6fb4dec 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -481,7 +481,13 @@ func (r *Request) Body(obj interface{}) *Request { return r } -// URL returns the current working URL. +// Error returns any error encountered constructing the request, if any. +func (r *Request) Error() error { + return r.err +} + +// URL returns the current working URL. Check the result of Error() to ensure +// that the returned URL is valid. func (r *Request) URL() *url.URL { p := r.pathPrefix if r.namespaceSet && len(r.namespace) > 0 { @@ -726,7 +732,6 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) { } resp, err := client.Do(req) - updateURLMetrics(ctx, r, resp, err) retry.After(ctx, r, resp, err) if err == nil && resp.StatusCode == http.StatusOK { return r.newStreamWatcher(resp) @@ -786,22 +791,36 @@ func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, error) ), nil } -// updateURLMetrics is a convenience function for pushing metrics. -// It also handles corner cases for incomplete/invalid request data. -func updateURLMetrics(ctx context.Context, req *Request, resp *http.Response, err error) { - url := "none" +// updateRequestResultMetric increments the RequestResult metric counter, +// it should be called with the (response, err) tuple from the final +// reply from the server. +func updateRequestResultMetric(ctx context.Context, req *Request, resp *http.Response, err error) { + code, host := sanitize(req, resp, err) + metrics.RequestResult.Increment(ctx, code, req.verb, host) +} + +// updateRequestRetryMetric increments the RequestRetry metric counter, +// it should be called with the (response, err) tuple for each retry +// except for the final attempt. +func updateRequestRetryMetric(ctx context.Context, req *Request, resp *http.Response, err error) { + code, host := sanitize(req, resp, err) + metrics.RequestRetry.IncrementRetry(ctx, code, req.verb, host) +} + +func sanitize(req *Request, resp *http.Response, err error) (string, string) { + host := "none" if req.c.base != nil { - url = req.c.base.Host + host = req.c.base.Host } // Errors can be arbitrary strings. Unbound label cardinality is not suitable for a metric // system so we just report them as ``. - if err != nil { - metrics.RequestResult.Increment(ctx, "", req.verb, url) - } else { - // Metrics for failure codes - metrics.RequestResult.Increment(ctx, strconv.Itoa(resp.StatusCode), req.verb, url) + code := "" + if resp != nil { + code = strconv.Itoa(resp.StatusCode) } + + return code, host } // Stream formats and executes the request, and offers streaming of the response. @@ -834,7 +853,6 @@ func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) { return nil, err } resp, err := client.Do(req) - updateURLMetrics(ctx, r, resp, err) retry.After(ctx, r, resp, err) if err != nil { // we only retry on an HTTP response with 'Retry-After' header @@ -979,7 +997,6 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp return err } resp, err := client.Do(req) - updateURLMetrics(ctx, r, resp, err) // The value -1 or a value of 0 with a non-nil Body indicates that the length is unknown. // https://pkg.go.dev/net/http#Request if req.ContentLength >= 0 && !(req.Body != nil && req.ContentLength == 0) { diff --git a/vendor/k8s.io/client-go/rest/with_retry.go b/vendor/k8s.io/client-go/rest/with_retry.go index 207060a5c..eaaadc6a4 100644 --- a/vendor/k8s.io/client-go/rest/with_retry.go +++ b/vendor/k8s.io/client-go/rest/with_retry.go @@ -242,8 +242,20 @@ func (r *withRetry) After(ctx context.Context, request *Request, resp *http.Resp // parameters calculated from the (response, err) tuple from // attempt N-1, so r.retryAfter is outdated and should not be // referred to here. + isRetry := r.retryAfter != nil r.retryAfter = nil + // the client finishes a single request after N attempts (1..N) + // - all attempts (1..N) are counted to the rest_client_requests_total + // metric (current behavior). + // - every attempt after the first (2..N) are counted to the + // rest_client_request_retries_total metric. + updateRequestResultMetric(ctx, request, resp, err) + if isRetry { + // this is attempt 2 or later + updateRequestRetryMetric(ctx, request, resp, err) + } + if request.c.base != nil { if err != nil { request.backoff.UpdateBackoff(request.URL(), err, 0) @@ -346,8 +358,12 @@ func retryAfterResponse() *http.Response { } func retryAfterResponseWithDelay(delay string) *http.Response { + return retryAfterResponseWithCodeAndDelay(http.StatusInternalServerError, delay) +} + +func retryAfterResponseWithCodeAndDelay(code int, delay string) *http.Response { return &http.Response{ - StatusCode: http.StatusInternalServerError, + StatusCode: code, Header: http.Header{"Retry-After": []string{delay}}, } } diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index 0762da3be..f437f2861 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -18,6 +18,7 @@ package cache import ( "errors" + "os" "sync" "time" @@ -50,11 +51,12 @@ type Config struct { Process ProcessFunc // ObjectType is an example object of the type this controller is - // expected to handle. Only the type needs to be right, except - // that when that is `unstructured.Unstructured` the object's - // `"apiVersion"` and `"kind"` must also be right. + // expected to handle. ObjectType runtime.Object + // ObjectDescription is the description to use when logging type-specific information about this controller. + ObjectDescription string + // FullResyncPeriod is the period at which ShouldResync is considered. FullResyncPeriod time.Duration @@ -84,7 +86,7 @@ type Config struct { type ShouldResyncFunc func() bool // ProcessFunc processes a single object. -type ProcessFunc func(obj interface{}) error +type ProcessFunc func(obj interface{}, isInInitialList bool) error // `*controller` implements Controller type controller struct { @@ -131,18 +133,24 @@ func (c *controller) Run(stopCh <-chan struct{}) { <-stopCh c.config.Queue.Close() }() - r := NewReflector( + r := NewReflectorWithOptions( c.config.ListerWatcher, c.config.ObjectType, c.config.Queue, - c.config.FullResyncPeriod, + ReflectorOptions{ + ResyncPeriod: c.config.FullResyncPeriod, + TypeDescription: c.config.ObjectDescription, + Clock: c.clock, + }, ) r.ShouldResync = c.config.ShouldResync r.WatchListPageSize = c.config.WatchListPageSize - r.clock = c.clock if c.config.WatchErrorHandler != nil { r.watchErrorHandler = c.config.WatchErrorHandler } + if s := os.Getenv("ENABLE_CLIENT_GO_WATCH_LIST_ALPHA"); len(s) > 0 { + r.UseWatchList = true + } c.reflectorMutex.Lock() c.reflector = r @@ -211,7 +219,7 @@ func (c *controller) processLoop() { // happen if the watch is closed and misses the delete event and we don't // notice the deletion until the subsequent re-list. type ResourceEventHandler interface { - OnAdd(obj interface{}) + OnAdd(obj interface{}, isInInitialList bool) OnUpdate(oldObj, newObj interface{}) OnDelete(obj interface{}) } @@ -220,6 +228,9 @@ type ResourceEventHandler interface { // as few of the notification functions as you want while still implementing // ResourceEventHandler. This adapter does not remove the prohibition against // modifying the objects. +// +// See ResourceEventHandlerDetailedFuncs if your use needs to propagate +// HasSynced. type ResourceEventHandlerFuncs struct { AddFunc func(obj interface{}) UpdateFunc func(oldObj, newObj interface{}) @@ -227,7 +238,7 @@ type ResourceEventHandlerFuncs struct { } // OnAdd calls AddFunc if it's not nil. -func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) { +func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}, isInInitialList bool) { if r.AddFunc != nil { r.AddFunc(obj) } @@ -247,6 +258,36 @@ func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) { } } +// ResourceEventHandlerDetailedFuncs is exactly like ResourceEventHandlerFuncs +// except its AddFunc accepts the isInInitialList parameter, for propagating +// HasSynced. +type ResourceEventHandlerDetailedFuncs struct { + AddFunc func(obj interface{}, isInInitialList bool) + UpdateFunc func(oldObj, newObj interface{}) + DeleteFunc func(obj interface{}) +} + +// OnAdd calls AddFunc if it's not nil. +func (r ResourceEventHandlerDetailedFuncs) OnAdd(obj interface{}, isInInitialList bool) { + if r.AddFunc != nil { + r.AddFunc(obj, isInInitialList) + } +} + +// OnUpdate calls UpdateFunc if it's not nil. +func (r ResourceEventHandlerDetailedFuncs) OnUpdate(oldObj, newObj interface{}) { + if r.UpdateFunc != nil { + r.UpdateFunc(oldObj, newObj) + } +} + +// OnDelete calls DeleteFunc if it's not nil. +func (r ResourceEventHandlerDetailedFuncs) OnDelete(obj interface{}) { + if r.DeleteFunc != nil { + r.DeleteFunc(obj) + } +} + // FilteringResourceEventHandler applies the provided filter to all events coming // in, ensuring the appropriate nested handler method is invoked. An object // that starts passing the filter after an update is considered an add, and an @@ -258,11 +299,11 @@ type FilteringResourceEventHandler struct { } // OnAdd calls the nested handler only if the filter succeeds -func (r FilteringResourceEventHandler) OnAdd(obj interface{}) { +func (r FilteringResourceEventHandler) OnAdd(obj interface{}, isInInitialList bool) { if !r.FilterFunc(obj) { return } - r.Handler.OnAdd(obj) + r.Handler.OnAdd(obj, isInInitialList) } // OnUpdate ensures the proper handler is called depending on whether the filter matches @@ -273,7 +314,7 @@ func (r FilteringResourceEventHandler) OnUpdate(oldObj, newObj interface{}) { case newer && older: r.Handler.OnUpdate(oldObj, newObj) case newer && !older: - r.Handler.OnAdd(newObj) + r.Handler.OnAdd(newObj, false) case !newer && older: r.Handler.OnDelete(oldObj) default: @@ -353,17 +394,6 @@ func NewIndexerInformer( return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil) } -// TransformFunc allows for transforming an object before it will be processed -// and put into the controller cache and before the corresponding handlers will -// be called on it. -// TransformFunc (similarly to ResourceEventHandler functions) should be able -// to correctly handle the tombstone of type cache.DeletedFinalStateUnknown -// -// The most common usage pattern is to clean-up some parts of the object to -// reduce component memory usage if a given component doesn't care about them. -// given controller doesn't care for them -type TransformFunc func(interface{}) (interface{}, error) - // NewTransformingInformer returns a Store and a controller for populating // the store while also providing event notifications. You should only used // the returned Store for Get/List operations; Add/Modify/Deletes will cause @@ -411,19 +441,12 @@ func processDeltas( // Object which receives event notifications from the given deltas handler ResourceEventHandler, clientState Store, - transformer TransformFunc, deltas Deltas, + isInInitialList bool, ) error { // from oldest to newest for _, d := range deltas { obj := d.Object - if transformer != nil { - var err error - obj, err = transformer(obj) - if err != nil { - return err - } - } switch d.Type { case Sync, Replaced, Added, Updated: @@ -436,7 +459,7 @@ func processDeltas( if err := clientState.Add(obj); err != nil { return err } - handler.OnAdd(obj) + handler.OnAdd(obj, isInInitialList) } case Deleted: if err := clientState.Delete(obj); err != nil { @@ -475,6 +498,7 @@ func newInformer( fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ KnownObjects: clientState, EmitDeltaTypeReplaced: true, + Transformer: transformer, }) cfg := &Config{ @@ -484,9 +508,9 @@ func newInformer( FullResyncPeriod: resyncPeriod, RetryOnError: false, - Process: func(obj interface{}) error { + Process: func(obj interface{}, isInInitialList bool) error { if deltas, ok := obj.(Deltas); ok { - return processDeltas(h, clientState, transformer, deltas) + return processDeltas(h, clientState, deltas, isInInitialList) } return errors.New("object given as Process argument is not Deltas") }, diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index 0c13a41f0..7160bb1ee 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -51,6 +51,10 @@ type DeltaFIFOOptions struct { // When true, `Replaced` events will be sent for items passed to a Replace() call. // When false, `Sync` events will be sent instead. EmitDeltaTypeReplaced bool + + // If set, will be called for objects before enqueueing them. Please + // see the comment on TransformFunc for details. + Transformer TransformFunc } // DeltaFIFO is like FIFO, but differs in two ways. One is that the @@ -129,8 +133,32 @@ type DeltaFIFO struct { // emitDeltaTypeReplaced is whether to emit the Replaced or Sync // DeltaType when Replace() is called (to preserve backwards compat). emitDeltaTypeReplaced bool + + // Called with every object if non-nil. + transformer TransformFunc } +// TransformFunc allows for transforming an object before it will be processed. +// TransformFunc (similarly to ResourceEventHandler functions) should be able +// to correctly handle the tombstone of type cache.DeletedFinalStateUnknown. +// +// New in v1.27: In such cases, the contained object will already have gone +// through the transform object separately (when it was added / updated prior +// to the delete), so the TransformFunc can likely safely ignore such objects +// (i.e., just return the input object). +// +// The most common usage pattern is to clean-up some parts of the object to +// reduce component memory usage if a given component doesn't care about them. +// +// New in v1.27: unless the object is a DeletedFinalStateUnknown, TransformFunc +// sees the object before any other actor, and it is now safe to mutate the +// object in place instead of making a copy. +// +// Note that TransformFunc is called while inserting objects into the +// notification queue and is therefore extremely performance sensitive; please +// do not do anything that will take a long time. +type TransformFunc func(interface{}) (interface{}, error) + // DeltaType is the type of a change (addition, deletion, etc) type DeltaType string @@ -227,6 +255,7 @@ func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO { knownObjects: opts.KnownObjects, emitDeltaTypeReplaced: opts.EmitDeltaTypeReplaced, + transformer: opts.Transformer, } f.cond.L = &f.lock return f @@ -271,6 +300,10 @@ func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) { func (f *DeltaFIFO) HasSynced() bool { f.lock.Lock() defer f.lock.Unlock() + return f.hasSynced_locked() +} + +func (f *DeltaFIFO) hasSynced_locked() bool { return f.populated && f.initialPopulationCount == 0 } @@ -411,6 +444,21 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err if err != nil { return KeyError{obj, err} } + + // Every object comes through this code path once, so this is a good + // place to call the transform func. If obj is a + // DeletedFinalStateUnknown tombstone, then the containted inner object + // will already have gone through the transformer, but we document that + // this can happen. In cases involving Replace(), such an object can + // come through multiple times. + if f.transformer != nil { + var err error + obj, err = f.transformer(obj) + if err != nil { + return err + } + } + oldDeltas := f.items[id] newDeltas := append(oldDeltas, Delta{actionType, obj}) newDeltas = dedupDeltas(newDeltas) @@ -526,6 +574,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { f.cond.Wait() } + isInInitialList := !f.hasSynced_locked() id := f.queue[0] f.queue = f.queue[1:] depth := len(f.queue) @@ -551,7 +600,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { utiltrace.Field{Key: "Reason", Value: "slow event handlers blocking the queue"}) defer trace.LogIfLong(100 * time.Millisecond) } - err := process(item) + err := process(item, isInInitialList) if e, ok := err.(ErrRequeue); ok { f.addIfNotPresent(id, item) err = e.Err @@ -566,12 +615,11 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { // using the Sync or Replace DeltaType and then (2) it does some deletions. // In particular: for every pre-existing key K that is not the key of // an object in `list` there is the effect of -// `Delete(DeletedFinalStateUnknown{K, O})` where O is current object -// of K. If `f.knownObjects == nil` then the pre-existing keys are -// those in `f.items` and the current object of K is the `.Newest()` -// of the Deltas associated with K. Otherwise the pre-existing keys -// are those listed by `f.knownObjects` and the current object of K is -// what `f.knownObjects.GetByKey(K)` returns. +// `Delete(DeletedFinalStateUnknown{K, O})` where O is the latest known +// object of K. The pre-existing keys are those in the union set of the keys in +// `f.items` and `f.knownObjects` (if not nil). The last known object for key K is +// the one present in the last delta in `f.items`. If there is no delta for K +// in `f.items`, it is the object in `f.knownObjects` func (f *DeltaFIFO) Replace(list []interface{}, _ string) error { f.lock.Lock() defer f.lock.Unlock() @@ -595,51 +643,23 @@ func (f *DeltaFIFO) Replace(list []interface{}, _ string) error { } } - if f.knownObjects == nil { - // Do deletion detection against our own list. - queuedDeletions := 0 - for k, oldItem := range f.items { - if keys.Has(k) { - continue - } - // Delete pre-existing items not in the new list. - // This could happen if watch deletion event was missed while - // disconnected from apiserver. - var deletedObj interface{} - if n := oldItem.Newest(); n != nil { - deletedObj = n.Object - } - queuedDeletions++ - if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { - return err - } - } - - if !f.populated { - f.populated = true - // While there shouldn't be any queued deletions in the initial - // population of the queue, it's better to be on the safe side. - f.initialPopulationCount = keys.Len() + queuedDeletions - } - - return nil - } - - // Detect deletions not already in the queue. - knownKeys := f.knownObjects.ListKeys() + // Do deletion detection against objects in the queue queuedDeletions := 0 - for _, k := range knownKeys { + for k, oldItem := range f.items { if keys.Has(k) { continue } - - deletedObj, exists, err := f.knownObjects.GetByKey(k) - if err != nil { - deletedObj = nil - klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k) - } else if !exists { - deletedObj = nil - klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k) + // Delete pre-existing items not in the new list. + // This could happen if watch deletion event was missed while + // disconnected from apiserver. + var deletedObj interface{} + if n := oldItem.Newest(); n != nil { + deletedObj = n.Object + + // if the previous object is a DeletedFinalStateUnknown, we have to extract the actual Object + if d, ok := deletedObj.(DeletedFinalStateUnknown); ok { + deletedObj = d.Obj + } } queuedDeletions++ if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { @@ -647,6 +667,32 @@ func (f *DeltaFIFO) Replace(list []interface{}, _ string) error { } } + if f.knownObjects != nil { + // Detect deletions for objects not present in the queue, but present in KnownObjects + knownKeys := f.knownObjects.ListKeys() + for _, k := range knownKeys { + if keys.Has(k) { + continue + } + if len(f.items[k]) > 0 { + continue + } + + deletedObj, exists, err := f.knownObjects.GetByKey(k) + if err != nil { + deletedObj = nil + klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k) + } else if !exists { + deletedObj = nil + klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k) + } + queuedDeletions++ + if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { + return err + } + } + } + if !f.populated { f.populated = true f.initialPopulationCount = keys.Len() + queuedDeletions diff --git a/vendor/k8s.io/client-go/tools/cache/fifo.go b/vendor/k8s.io/client-go/tools/cache/fifo.go index 8f3313783..dd13c4ea7 100644 --- a/vendor/k8s.io/client-go/tools/cache/fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/fifo.go @@ -25,7 +25,7 @@ import ( // PopProcessFunc is passed to Pop() method of Queue interface. // It is supposed to process the accumulator popped from the queue. -type PopProcessFunc func(interface{}) error +type PopProcessFunc func(obj interface{}, isInInitialList bool) error // ErrRequeue may be returned by a PopProcessFunc to safely requeue // the current item. The value of Err will be returned from Pop. @@ -82,9 +82,12 @@ type Queue interface { // Pop is helper function for popping from Queue. // WARNING: Do NOT use this function in non-test code to avoid races // unless you really really really really know what you are doing. +// +// NOTE: This function is deprecated and may be removed in the future without +// additional warning. func Pop(queue Queue) interface{} { var result interface{} - queue.Pop(func(obj interface{}) error { + queue.Pop(func(obj interface{}, isInInitialList bool) error { result = obj return nil }) @@ -149,6 +152,10 @@ func (f *FIFO) Close() { func (f *FIFO) HasSynced() bool { f.lock.Lock() defer f.lock.Unlock() + return f.hasSynced_locked() +} + +func (f *FIFO) hasSynced_locked() bool { return f.populated && f.initialPopulationCount == 0 } @@ -287,6 +294,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { f.cond.Wait() } + isInInitialList := !f.hasSynced_locked() id := f.queue[0] f.queue = f.queue[1:] if f.initialPopulationCount > 0 { @@ -298,7 +306,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { continue } delete(f.items, id) - err := process(item) + err := process(item, isInInitialList) if e, ok := err.(ErrRequeue); ok { f.addIfNotPresent(id, item) err = e.Err diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index 9cd476be8..2b335c104 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -23,6 +23,7 @@ import ( "io" "math/rand" "reflect" + "strings" "sync" "time" @@ -40,6 +41,7 @@ import ( "k8s.io/client-go/tools/pager" "k8s.io/klog/v2" "k8s.io/utils/clock" + "k8s.io/utils/pointer" "k8s.io/utils/trace" ) @@ -49,12 +51,11 @@ const defaultExpectedTypeName = "" type Reflector struct { // name identifies this reflector. By default it will be a file:line if possible. name string - // The name of the type we expect to place in the store. The name // will be the stringification of expectedGVK if provided, and the // stringification of expectedType otherwise. It is for display // only, and should not be used for parsing or comparison. - expectedTypeName string + typeDescription string // An example object of the type we expect to place in the store. // Only the type needs to be right, except that when that is // `unstructured.Unstructured` the object's `"apiVersion"` and @@ -66,17 +67,11 @@ type Reflector struct { store Store // listerWatcher is used to perform lists and watches. listerWatcher ListerWatcher - // backoff manages backoff of ListWatch backoffManager wait.BackoffManager // initConnBackoffManager manages backoff the initial connection with the Watch call of ListAndWatch. initConnBackoffManager wait.BackoffManager - // MaxInternalErrorRetryDuration defines how long we should retry internal errors returned by watch. - MaxInternalErrorRetryDuration time.Duration - - resyncPeriod time.Duration - // ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked - ShouldResync func() bool + resyncPeriod time.Duration // clock allows tests to manipulate time clock clock.Clock // paginatedResult defines whether pagination should be forced for list calls. @@ -91,6 +86,8 @@ type Reflector struct { isLastSyncResourceVersionUnavailable bool // lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion lastSyncResourceVersionMutex sync.RWMutex + // Called whenever the ListAndWatch drops the connection with an error. + watchErrorHandler WatchErrorHandler // WatchListPageSize is the requested chunk size of initial and resync watch lists. // If unset, for consistent reads (RV="") or reads that opt-into arbitrarily old data // (RV="0") it will default to pager.PageSize, for the rest (RV != "" && RV != "0") @@ -99,8 +96,19 @@ type Reflector struct { // etcd, which is significantly less efficient and may lead to serious performance and // scalability problems. WatchListPageSize int64 - // Called whenever the ListAndWatch drops the connection with an error. - watchErrorHandler WatchErrorHandler + // ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked + ShouldResync func() bool + // MaxInternalErrorRetryDuration defines how long we should retry internal errors returned by watch. + MaxInternalErrorRetryDuration time.Duration + // UseWatchList if turned on instructs the reflector to open a stream to bring data from the API server. + // Streaming has the primary advantage of using fewer server's resources to fetch data. + // + // The old behaviour establishes a LIST request which gets data in chunks. + // Paginated list is less efficient and depending on the actual size of objects + // might result in an increased memory consumption of the APIServer. + // + // See https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#design-details + UseWatchList bool } // ResourceVersionUpdater is an interface that allows store implementation to @@ -131,13 +139,13 @@ func DefaultWatchErrorHandler(r *Reflector, err error) { // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already // has a semantic that it returns data at least as fresh as provided RV. // So first try to LIST with setting RV to resource version of last observed object. - klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err) + klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.typeDescription, err) case err == io.EOF: // watch closed normally case err == io.ErrUnexpectedEOF: - klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedTypeName, err) + klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.typeDescription, err) default: - utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedTypeName, err)) + utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.typeDescription, err)) } } @@ -155,7 +163,40 @@ func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interfa return indexer, reflector } -// NewReflector creates a new Reflector object which will keep the +// NewReflector creates a new Reflector with its name defaulted to the closest source_file.go:line in the call stack +// that is outside this package. See NewReflectorWithOptions for further information. +func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + return NewReflectorWithOptions(lw, expectedType, store, ReflectorOptions{ResyncPeriod: resyncPeriod}) +} + +// NewNamedReflector creates a new Reflector with the specified name. See NewReflectorWithOptions for further +// information. +func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + return NewReflectorWithOptions(lw, expectedType, store, ReflectorOptions{Name: name, ResyncPeriod: resyncPeriod}) +} + +// ReflectorOptions configures a Reflector. +type ReflectorOptions struct { + // Name is the Reflector's name. If unset/unspecified, the name defaults to the closest source_file.go:line + // in the call stack that is outside this package. + Name string + + // TypeDescription is the Reflector's type description. If unset/unspecified, the type description is defaulted + // using the following rules: if the expectedType passed to NewReflectorWithOptions was nil, the type description is + // "". If the expectedType is an instance of *unstructured.Unstructured and its apiVersion and kind fields + // are set, the type description is the string encoding of those. Otherwise, the type description is set to the + // go type of expectedType.. + TypeDescription string + + // ResyncPeriod is the Reflector's resync period. If unset/unspecified, the resync period defaults to 0 + // (do not resync). + ResyncPeriod time.Duration + + // Clock allows tests to control time. If unset defaults to clock.RealClock{} + Clock clock.Clock +} + +// NewReflectorWithOptions creates a new Reflector object which will keep the // given store up to date with the server's contents for the given // resource. Reflector promises to only put things in the store that // have the type of expectedType, unless expectedType is nil. If @@ -165,49 +206,74 @@ func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interfa // "yes". This enables you to use reflectors to periodically process // everything as well as incrementally processing the things that // change. -func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { - return NewNamedReflector(naming.GetNameFromCallsite(internalPackages...), lw, expectedType, store, resyncPeriod) -} - -// NewNamedReflector same as NewReflector, but with a specified name for logging -func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { - realClock := &clock.RealClock{} +func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store Store, options ReflectorOptions) *Reflector { + reflectorClock := options.Clock + if reflectorClock == nil { + reflectorClock = clock.RealClock{} + } r := &Reflector{ - name: name, - listerWatcher: lw, - store: store, + name: options.Name, + resyncPeriod: options.ResyncPeriod, + typeDescription: options.TypeDescription, + listerWatcher: lw, + store: store, // We used to make the call every 1sec (1 QPS), the goal here is to achieve ~98% traffic reduction when // API server is not healthy. With these parameters, backoff will stop at [30,60) sec interval which is // 0.22 QPS. If we don't backoff for 2min, assume API server is healthy and we reset the backoff. - backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), - initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), - resyncPeriod: resyncPeriod, - clock: realClock, + backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock), + initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock), + clock: reflectorClock, watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler), + expectedType: reflect.TypeOf(expectedType), } - r.setExpectedType(expectedType) + + if r.name == "" { + r.name = naming.GetNameFromCallsite(internalPackages...) + } + + if r.typeDescription == "" { + r.typeDescription = getTypeDescriptionFromObject(expectedType) + } + + if r.expectedGVK == nil { + r.expectedGVK = getExpectedGVKFromObject(expectedType) + } + return r } -func (r *Reflector) setExpectedType(expectedType interface{}) { - r.expectedType = reflect.TypeOf(expectedType) - if r.expectedType == nil { - r.expectedTypeName = defaultExpectedTypeName - return +func getTypeDescriptionFromObject(expectedType interface{}) string { + if expectedType == nil { + return defaultExpectedTypeName } - r.expectedTypeName = r.expectedType.String() + reflectDescription := reflect.TypeOf(expectedType).String() - if obj, ok := expectedType.(*unstructured.Unstructured); ok { - // Use gvk to check that watch event objects are of the desired type. - gvk := obj.GroupVersionKind() - if gvk.Empty() { - klog.V(4).Infof("Reflector from %s configured with expectedType of *unstructured.Unstructured with empty GroupVersionKind.", r.name) - return - } - r.expectedGVK = &gvk - r.expectedTypeName = gvk.String() + obj, ok := expectedType.(*unstructured.Unstructured) + if !ok { + return reflectDescription } + + gvk := obj.GroupVersionKind() + if gvk.Empty() { + return reflectDescription + } + + return gvk.String() +} + +func getExpectedGVKFromObject(expectedType interface{}) *schema.GroupVersionKind { + obj, ok := expectedType.(*unstructured.Unstructured) + if !ok { + return nil + } + + gvk := obj.GroupVersionKind() + if gvk.Empty() { + return nil + } + + return &gvk } // internalPackages are packages that ignored when creating a default reflector name. These packages are in the common @@ -218,13 +284,13 @@ var internalPackages = []string{"client-go/tools/cache/"} // objects and subsequent deltas. // Run will exit when stopCh is closed. func (r *Reflector) Run(stopCh <-chan struct{}) { - klog.V(3).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) + klog.V(3).Infof("Starting reflector %s (%s) from %s", r.typeDescription, r.resyncPeriod, r.name) wait.BackoffUntil(func() { if err := r.ListAndWatch(stopCh); err != nil { r.watchErrorHandler(r, err) } }, r.backoffManager, true, stopCh) - klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) + klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.typeDescription, r.resyncPeriod, r.name) } var ( @@ -254,42 +320,75 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) { // and then use the resource version to watch. // It returns error if ListAndWatch didn't even try to initialize watch. func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { - klog.V(3).Infof("Listing and watching %v from %s", r.expectedTypeName, r.name) + klog.V(3).Infof("Listing and watching %v from %s", r.typeDescription, r.name) + var err error + var w watch.Interface + fallbackToList := !r.UseWatchList - err := r.list(stopCh) - if err != nil { - return err + if r.UseWatchList { + w, err = r.watchList(stopCh) + if w == nil && err == nil { + // stopCh was closed + return nil + } + if err != nil { + if !apierrors.IsInvalid(err) { + return err + } + klog.Warning("the watch-list feature is not supported by the server, falling back to the previous LIST/WATCH semantic") + fallbackToList = true + // Ensure that we won't accidentally pass some garbage down the watch. + w = nil + } + } + + if fallbackToList { + err = r.list(stopCh) + if err != nil { + return err + } } resyncerrc := make(chan error, 1) cancelCh := make(chan struct{}) defer close(cancelCh) - go func() { - resyncCh, cleanup := r.resyncChan() - defer func() { - cleanup() // Call the last one written into cleanup - }() - for { - select { - case <-resyncCh: - case <-stopCh: - return - case <-cancelCh: + go r.startResync(stopCh, cancelCh, resyncerrc) + return r.watch(w, stopCh, resyncerrc) +} + +// startResync periodically calls r.store.Resync() method. +// Note that this method is blocking and should be +// called in a separate goroutine. +func (r *Reflector) startResync(stopCh <-chan struct{}, cancelCh <-chan struct{}, resyncerrc chan error) { + resyncCh, cleanup := r.resyncChan() + defer func() { + cleanup() // Call the last one written into cleanup + }() + for { + select { + case <-resyncCh: + case <-stopCh: + return + case <-cancelCh: + return + } + if r.ShouldResync == nil || r.ShouldResync() { + klog.V(4).Infof("%s: forcing resync", r.name) + if err := r.store.Resync(); err != nil { + resyncerrc <- err return } - if r.ShouldResync == nil || r.ShouldResync() { - klog.V(4).Infof("%s: forcing resync", r.name) - if err := r.store.Resync(); err != nil { - resyncerrc <- err - return - } - } - cleanup() - resyncCh, cleanup = r.resyncChan() } - }() + cleanup() + resyncCh, cleanup = r.resyncChan() + } +} +// watch simply starts a watch request with the server. +func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc chan error) error { + var err error retry := NewRetryWithDeadline(r.MaxInternalErrorRetryDuration, time.Minute, apierrors.IsInternalError, r.clock) + for { // give the stopCh a chance to stop the loop, even in case of continue statements further down on errors select { @@ -298,35 +397,41 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { default: } - timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) - options := metav1.ListOptions{ - ResourceVersion: r.LastSyncResourceVersion(), - // We want to avoid situations of hanging watchers. Stop any watchers that do not - // receive any events within the timeout window. - TimeoutSeconds: &timeoutSeconds, - // To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks. - // Reflector doesn't assume bookmarks are returned at all (if the server do not support - // watch bookmarks, it will ignore this field). - AllowWatchBookmarks: true, - } - // start the clock before sending the request, since some proxies won't flush headers until after the first watch event is sent start := r.clock.Now() - w, err := r.listerWatcher.Watch(options) - if err != nil { - // If this is "connection refused" error, it means that most likely apiserver is not responsive. - // It doesn't make sense to re-list all objects because most likely we will be able to restart - // watch where we ended. - // If that's the case begin exponentially backing off and resend watch request. - // Do the same for "429" errors. - if utilnet.IsConnectionRefused(err) || apierrors.IsTooManyRequests(err) { - <-r.initConnBackoffManager.Backoff().C() - continue + + if w == nil { + timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + options := metav1.ListOptions{ + ResourceVersion: r.LastSyncResourceVersion(), + // We want to avoid situations of hanging watchers. Stop any watchers that do not + // receive any events within the timeout window. + TimeoutSeconds: &timeoutSeconds, + // To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks. + // Reflector doesn't assume bookmarks are returned at all (if the server do not support + // watch bookmarks, it will ignore this field). + AllowWatchBookmarks: true, + } + + w, err = r.listerWatcher.Watch(options) + if err != nil { + if canRetry := isWatchErrorRetriable(err); canRetry { + klog.V(4).Infof("%s: watch of %v returned %v - backing off", r.name, r.typeDescription, err) + select { + case <-stopCh: + return nil + case <-r.initConnBackoffManager.Backoff().C(): + continue + } + } + return err } - return err } - err = watchHandler(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.expectedTypeName, r.setLastSyncResourceVersion, r.clock, resyncerrc, stopCh) + err = watchHandler(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.typeDescription, r.setLastSyncResourceVersion, nil, r.clock, resyncerrc, stopCh) + // Ensure that watch will not be reused across iterations. + w.Stop() + w = nil retry.After(err) if err != nil { if err != errorStopRequested { @@ -335,16 +440,20 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already // has a semantic that it returns data at least as fresh as provided RV. // So first try to LIST with setting RV to resource version of last observed object. - klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err) + klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.typeDescription, err) case apierrors.IsTooManyRequests(err): - klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.expectedTypeName) - <-r.initConnBackoffManager.Backoff().C() - continue + klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.typeDescription) + select { + case <-stopCh: + return nil + case <-r.initConnBackoffManager.Backoff().C(): + continue + } case apierrors.IsInternalError(err) && retry.ShouldRetry(): - klog.V(2).Infof("%s: retrying watch of %v internal error: %v", r.name, r.expectedTypeName, err) + klog.V(2).Infof("%s: retrying watch of %v internal error: %v", r.name, r.typeDescription, err) continue default: - klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err) + klog.Warningf("%s: watch of %v ended with: %v", r.name, r.typeDescription, err) } } return nil @@ -421,8 +530,8 @@ func (r *Reflector) list(stopCh <-chan struct{}) error { } initTrace.Step("Objects listed", trace.Field{Key: "error", Value: err}) if err != nil { - klog.Warningf("%s: failed to list %v: %v", r.name, r.expectedTypeName, err) - return fmt.Errorf("failed to list %v: %w", r.expectedTypeName, err) + klog.Warningf("%s: failed to list %v: %v", r.name, r.typeDescription, err) + return fmt.Errorf("failed to list %v: %w", r.typeDescription, err) } // We check if the list was paginated and if so set the paginatedResult based on that. @@ -460,6 +569,114 @@ func (r *Reflector) list(stopCh <-chan struct{}) error { return nil } +// watchList establishes a stream to get a consistent snapshot of data +// from the server as described in https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#proposal +// +// case 1: start at Most Recent (RV="", ResourceVersionMatch=ResourceVersionMatchNotOlderThan) +// Establishes a consistent stream with the server. +// That means the returned data is consistent, as if, served directly from etcd via a quorum read. +// It begins with synthetic "Added" events of all resources up to the most recent ResourceVersion. +// It ends with a synthetic "Bookmark" event containing the most recent ResourceVersion. +// After receiving a "Bookmark" event the reflector is considered to be synchronized. +// It replaces its internal store with the collected items and +// reuses the current watch requests for getting further events. +// +// case 2: start at Exact (RV>"0", ResourceVersionMatch=ResourceVersionMatchNotOlderThan) +// Establishes a stream with the server at the provided resource version. +// To establish the initial state the server begins with synthetic "Added" events. +// It ends with a synthetic "Bookmark" event containing the provided or newer resource version. +// After receiving a "Bookmark" event the reflector is considered to be synchronized. +// It replaces its internal store with the collected items and +// reuses the current watch requests for getting further events. +func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { + var w watch.Interface + var err error + var temporaryStore Store + var resourceVersion string + // TODO(#115478): see if this function could be turned + // into a method and see if error handling + // could be unified with the r.watch method + isErrorRetriableWithSideEffectsFn := func(err error) bool { + if canRetry := isWatchErrorRetriable(err); canRetry { + klog.V(2).Infof("%s: watch-list of %v returned %v - backing off", r.name, r.typeDescription, err) + <-r.initConnBackoffManager.Backoff().C() + return true + } + if isExpiredError(err) || isTooLargeResourceVersionError(err) { + // we tried to re-establish a watch request but the provided RV + // has either expired or it is greater than the server knows about. + // In that case we reset the RV and + // try to get a consistent snapshot from the watch cache (case 1) + r.setIsLastSyncResourceVersionUnavailable(true) + return true + } + return false + } + + initTrace := trace.New("Reflector WatchList", trace.Field{Key: "name", Value: r.name}) + defer initTrace.LogIfLong(10 * time.Second) + for { + select { + case <-stopCh: + return nil, nil + default: + } + + resourceVersion = "" + lastKnownRV := r.rewatchResourceVersion() + temporaryStore = NewStore(DeletionHandlingMetaNamespaceKeyFunc) + // TODO(#115478): large "list", slow clients, slow network, p&f + // might slow down streaming and eventually fail. + // maybe in such a case we should retry with an increased timeout? + timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + options := metav1.ListOptions{ + ResourceVersion: lastKnownRV, + AllowWatchBookmarks: true, + SendInitialEvents: pointer.Bool(true), + ResourceVersionMatch: metav1.ResourceVersionMatchNotOlderThan, + TimeoutSeconds: &timeoutSeconds, + } + start := r.clock.Now() + + w, err = r.listerWatcher.Watch(options) + if err != nil { + if isErrorRetriableWithSideEffectsFn(err) { + continue + } + return nil, err + } + bookmarkReceived := pointer.Bool(false) + err = watchHandler(start, w, temporaryStore, r.expectedType, r.expectedGVK, r.name, r.typeDescription, + func(rv string) { resourceVersion = rv }, + bookmarkReceived, + r.clock, make(chan error), stopCh) + if err != nil { + w.Stop() // stop and retry with clean state + if err == errorStopRequested { + return nil, nil + } + if isErrorRetriableWithSideEffectsFn(err) { + continue + } + return nil, err + } + if *bookmarkReceived { + break + } + } + // We successfully got initial state from watch-list confirmed by the + // "k8s.io/initial-events-end" bookmark. + initTrace.Step("Objects streamed", trace.Field{Key: "count", Value: len(temporaryStore.List())}) + r.setIsLastSyncResourceVersionUnavailable(false) + if err = r.store.Replace(temporaryStore.List(), resourceVersion); err != nil { + return nil, fmt.Errorf("unable to sync watch-list result: %v", err) + } + initTrace.Step("SyncWith done") + r.setLastSyncResourceVersion(resourceVersion) + + return w, nil +} + // syncWith replaces the store's items with the given list. func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) error { found := make([]interface{}, 0, len(items)) @@ -478,15 +695,17 @@ func watchHandler(start time.Time, name string, expectedTypeName string, setLastSyncResourceVersion func(string), + exitOnInitialEventsEndBookmark *bool, clock clock.Clock, errc chan error, stopCh <-chan struct{}, ) error { eventCount := 0 - - // Stopping the watcher should be idempotent and if we return from this function there's no way - // we're coming back in with the same watch interface. - defer w.Stop() + if exitOnInitialEventsEndBookmark != nil { + // set it to false just in case somebody + // made it positive + *exitOnInitialEventsEndBookmark = false + } loop: for { @@ -541,6 +760,11 @@ loop: } case watch.Bookmark: // A `Bookmark` means watch has synced here, just update the resourceVersion + if _, ok := meta.GetAnnotations()["k8s.io/initial-events-end"]; ok { + if exitOnInitialEventsEndBookmark != nil { + *exitOnInitialEventsEndBookmark = true + } + } default: utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", name, event)) } @@ -549,6 +773,11 @@ loop: rvu.UpdateResourceVersion(resourceVersion) } eventCount++ + if exitOnInitialEventsEndBookmark != nil && *exitOnInitialEventsEndBookmark { + watchDuration := clock.Since(start) + klog.V(4).Infof("exiting %v Watch because received the bookmark that marks the end of initial events stream, total %v items received in %v", name, eventCount, watchDuration) + return nil + } } } @@ -597,6 +826,18 @@ func (r *Reflector) relistResourceVersion() string { return r.lastSyncResourceVersion } +// rewatchResourceVersion determines the resource version the reflector should start streaming from. +func (r *Reflector) rewatchResourceVersion() string { + r.lastSyncResourceVersionMutex.RLock() + defer r.lastSyncResourceVersionMutex.RUnlock() + if r.isLastSyncResourceVersionUnavailable { + // initial stream should return data at the most recent resource version. + // the returned data must be consistent i.e. as if served from etcd via a quorum read + return "" + } + return r.lastSyncResourceVersion +} + // setIsLastSyncResourceVersionUnavailable sets if the last list or watch request with lastSyncResourceVersion returned // "expired" or "too large resource version" error. func (r *Reflector) setIsLastSyncResourceVersionUnavailable(isUnavailable bool) { @@ -635,5 +876,25 @@ func isTooLargeResourceVersionError(err error) bool { return true } } + + // Matches the message returned by api server before 1.17.0 + if strings.Contains(apierr.Status().Message, "Too large resource version") { + return true + } + + return false +} + +// isWatchErrorRetriable determines if it is safe to retry +// a watch error retrieved from the server. +func isWatchErrorRetriable(err error) bool { + // If this is "connection refused" error, it means that most likely apiserver is not responsive. + // It doesn't make sense to re-list all objects because most likely we will be able to restart + // watch where we ended. + // If that's the case begin exponentially backing off and resend watch request. + // Do the same for "429" errors. + if utilnet.IsConnectionRefused(err) || apierrors.IsTooManyRequests(err) { + return true + } return false } diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index f5c7316a1..a889fdbc3 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache/synctrack" "k8s.io/utils/buffer" "k8s.io/utils/clock" @@ -132,11 +133,13 @@ import ( // state, except that its ResourceVersion is replaced with a // ResourceVersion in which the object is actually absent. type SharedInformer interface { - // AddEventHandler adds an event handler to the shared informer using the shared informer's resync - // period. Events to a single handler are delivered sequentially, but there is no coordination - // between different handlers. - // It returns a registration handle for the handler that can be used to remove - // the handler again. + // AddEventHandler adds an event handler to the shared informer using + // the shared informer's resync period. Events to a single handler are + // delivered sequentially, but there is no coordination between + // different handlers. + // It returns a registration handle for the handler that can be used to + // remove the handler again, or to tell if the handler is synced (has + // seen every item in the initial list). AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error) // AddEventHandlerWithResyncPeriod adds an event handler to the // shared informer with the requested resync period; zero means @@ -169,6 +172,10 @@ type SharedInformer interface { // HasSynced returns true if the shared informer's store has been // informed by at least one full LIST of the authoritative state // of the informer's object collection. This is unrelated to "resync". + // + // Note that this doesn't tell you if an individual handler is synced!! + // For that, please call HasSynced on the handle returned by + // AddEventHandler. HasSynced() bool // LastSyncResourceVersion is the resource version observed when last synced with the underlying // store. The value returned is not synchronized with access to the underlying store and is not @@ -198,10 +205,7 @@ type SharedInformer interface { // // Must be set before starting the informer. // - // Note: Since the object given to the handler may be already shared with - // other goroutines, it is advisable to copy the object being - // transform before mutating it at all and returning the copy to prevent - // data races. + // Please see the comment on TransformFunc for more details. SetTransform(handler TransformFunc) error // IsStopped reports whether the informer has already been stopped. @@ -213,7 +217,14 @@ type SharedInformer interface { // Opaque interface representing the registration of ResourceEventHandler for // a SharedInformer. Must be supplied back to the same SharedInformer's // `RemoveEventHandler` to unregister the handlers. -type ResourceEventHandlerRegistration interface{} +// +// Also used to tell if the handler is synced (has had all items in the initial +// list delivered). +type ResourceEventHandlerRegistration interface { + // HasSynced reports if both the parent has synced and all pre-sync + // events have been delivered. + HasSynced() bool +} // SharedIndexInformer provides add and get Indexers ability based on SharedInformer. type SharedIndexInformer interface { @@ -223,14 +234,26 @@ type SharedIndexInformer interface { GetIndexer() Indexer } -// NewSharedInformer creates a new instance for the listwatcher. +// NewSharedInformer creates a new instance for the ListerWatcher. See NewSharedIndexInformerWithOptions for full details. func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) SharedInformer { return NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, Indexers{}) } -// NewSharedIndexInformer creates a new instance for the listwatcher. -// The created informer will not do resyncs if the given -// defaultEventHandlerResyncPeriod is zero. Otherwise: for each +// NewSharedIndexInformer creates a new instance for the ListerWatcher and specified Indexers. See +// NewSharedIndexInformerWithOptions for full details. +func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer { + return NewSharedIndexInformerWithOptions( + lw, + exampleObject, + SharedIndexInformerOptions{ + ResyncPeriod: defaultEventHandlerResyncPeriod, + Indexers: indexers, + }, + ) +} + +// NewSharedIndexInformerWithOptions creates a new instance for the ListerWatcher. +// The created informer will not do resyncs if options.ResyncPeriod is zero. Otherwise: for each // handler that with a non-zero requested resync period, whether added // before or after the informer starts, the nominal resync period is // the requested resync period rounded up to a multiple of the @@ -238,21 +261,36 @@ func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEv // checking period is established when the informer starts running, // and is the maximum of (a) the minimum of the resync periods // requested before the informer starts and the -// defaultEventHandlerResyncPeriod given here and (b) the constant +// options.ResyncPeriod given here and (b) the constant // `minimumResyncPeriod` defined in this file. -func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer { +func NewSharedIndexInformerWithOptions(lw ListerWatcher, exampleObject runtime.Object, options SharedIndexInformerOptions) SharedIndexInformer { realClock := &clock.RealClock{} - sharedIndexInformer := &sharedIndexInformer{ + + return &sharedIndexInformer{ + indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, options.Indexers), processor: &sharedProcessor{clock: realClock}, - indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers), listerWatcher: lw, objectType: exampleObject, - resyncCheckPeriod: defaultEventHandlerResyncPeriod, - defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod, - cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)), + objectDescription: options.ObjectDescription, + resyncCheckPeriod: options.ResyncPeriod, + defaultEventHandlerResyncPeriod: options.ResyncPeriod, clock: realClock, + cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)), } - return sharedIndexInformer +} + +// SharedIndexInformerOptions configures a sharedIndexInformer. +type SharedIndexInformerOptions struct { + // ResyncPeriod is the default event handler resync period and resync check + // period. If unset/unspecified, these are defaulted to 0 (do not resync). + ResyncPeriod time.Duration + + // Indexers is the sharedIndexInformer's indexers. If unset/unspecified, no indexers are configured. + Indexers Indexers + + // ObjectDescription is the sharedIndexInformer's object description. This is passed through to the + // underlying Reflector's type description. + ObjectDescription string } // InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced. @@ -326,12 +364,13 @@ type sharedIndexInformer struct { listerWatcher ListerWatcher - // objectType is an example object of the type this informer is - // expected to handle. Only the type needs to be right, except - // that when that is `unstructured.Unstructured` the object's - // `"apiVersion"` and `"kind"` must also be right. + // objectType is an example object of the type this informer is expected to handle. If set, an event + // with an object with a mismatching type is dropped instead of being delivered to listeners. objectType runtime.Object + // objectDescription is the description of this informer's objects. This typically defaults to + objectDescription string + // resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call // shouldResync to check if any of our listeners need a resync. resyncCheckPeriod time.Duration @@ -381,7 +420,8 @@ type updateNotification struct { } type addNotification struct { - newObj interface{} + newObj interface{} + isInInitialList bool } type deleteNotification struct { @@ -422,15 +462,17 @@ func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ KnownObjects: s.indexer, EmitDeltaTypeReplaced: true, + Transformer: s.transform, }) cfg := &Config{ - Queue: fifo, - ListerWatcher: s.listerWatcher, - ObjectType: s.objectType, - FullResyncPeriod: s.resyncCheckPeriod, - RetryOnError: false, - ShouldResync: s.processor.shouldResync, + Queue: fifo, + ListerWatcher: s.listerWatcher, + ObjectType: s.objectType, + ObjectDescription: s.objectDescription, + FullResyncPeriod: s.resyncCheckPeriod, + RetryOnError: false, + ShouldResync: s.processor.shouldResync, Process: s.HandleDeltas, WatchErrorHandler: s.watchErrorHandler, @@ -559,7 +601,7 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv } } - listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize) + listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize, s.HasSynced) if !s.started { return s.processor.addListener(listener), nil @@ -575,27 +617,35 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv handle := s.processor.addListener(listener) for _, item := range s.indexer.List() { - listener.add(addNotification{newObj: item}) + // Note that we enqueue these notifications with the lock held + // and before returning the handle. That means there is never a + // chance for anyone to call the handle's HasSynced method in a + // state when it would falsely return true (i.e., when the + // shared informer is synced but it has not observed an Add + // with isInitialList being true, nor when the thread + // processing notifications somehow goes faster than this + // thread adding them and the counter is temporarily zero). + listener.add(addNotification{newObj: item, isInInitialList: true}) } return handle, nil } -func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error { +func (s *sharedIndexInformer) HandleDeltas(obj interface{}, isInInitialList bool) error { s.blockDeltas.Lock() defer s.blockDeltas.Unlock() if deltas, ok := obj.(Deltas); ok { - return processDeltas(s, s.indexer, s.transform, deltas) + return processDeltas(s, s.indexer, deltas, isInInitialList) } return errors.New("object given as Process argument is not Deltas") } // Conforms to ResourceEventHandler -func (s *sharedIndexInformer) OnAdd(obj interface{}) { +func (s *sharedIndexInformer) OnAdd(obj interface{}, isInInitialList bool) { // Invocation of this function is locked under s.blockDeltas, so it is // save to distribute the notification s.cacheMutationDetector.AddObject(obj) - s.processor.distribute(addNotification{newObj: obj}, false) + s.processor.distribute(addNotification{newObj: obj, isInInitialList: isInInitialList}, false) } // Conforms to ResourceEventHandler @@ -817,6 +867,8 @@ type processorListener struct { handler ResourceEventHandler + syncTracker *synctrack.SingleFileTracker + // pendingNotifications is an unbounded ring buffer that holds all notifications not yet distributed. // There is one per listener, but a failing/stalled listener will have infinite pendingNotifications // added until we OOM. @@ -847,11 +899,18 @@ type processorListener struct { resyncLock sync.Mutex } -func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int) *processorListener { +// HasSynced returns true if the source informer has synced, and all +// corresponding events have been delivered. +func (p *processorListener) HasSynced() bool { + return p.syncTracker.HasSynced() +} + +func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int, hasSynced func() bool) *processorListener { ret := &processorListener{ nextCh: make(chan interface{}), addCh: make(chan interface{}), handler: handler, + syncTracker: &synctrack.SingleFileTracker{UpstreamHasSynced: hasSynced}, pendingNotifications: *buffer.NewRingGrowing(bufferSize), requestedResyncPeriod: requestedResyncPeriod, resyncPeriod: resyncPeriod, @@ -863,6 +922,9 @@ func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, res } func (p *processorListener) add(notification interface{}) { + if a, ok := notification.(addNotification); ok && a.isInInitialList { + p.syncTracker.Start() + } p.addCh <- notification } @@ -908,7 +970,10 @@ func (p *processorListener) run() { case updateNotification: p.handler.OnUpdate(notification.oldObj, notification.newObj) case addNotification: - p.handler.OnAdd(notification.newObj) + p.handler.OnAdd(notification.newObj, notification.isInInitialList) + if notification.isInInitialList { + p.syncTracker.Finished() + } case deleteNotification: p.handler.OnDelete(notification.oldObj) default: diff --git a/vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go b/vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go new file mode 100644 index 000000000..ce51da9af --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go @@ -0,0 +1,83 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package synctrack + +import ( + "sync" + "sync/atomic" +) + +// Lazy defers the computation of `Evaluate` to when it is necessary. It is +// possible that Evaluate will be called in parallel from multiple goroutines. +type Lazy[T any] struct { + Evaluate func() (T, error) + + cache atomic.Pointer[cacheEntry[T]] +} + +type cacheEntry[T any] struct { + eval func() (T, error) + lock sync.RWMutex + result *T +} + +func (e *cacheEntry[T]) get() (T, error) { + if cur := func() *T { + e.lock.RLock() + defer e.lock.RUnlock() + return e.result + }(); cur != nil { + return *cur, nil + } + + e.lock.Lock() + defer e.lock.Unlock() + if e.result != nil { + return *e.result, nil + } + r, err := e.eval() + if err == nil { + e.result = &r + } + return r, err +} + +func (z *Lazy[T]) newCacheEntry() *cacheEntry[T] { + return &cacheEntry[T]{eval: z.Evaluate} +} + +// Notify should be called when something has changed necessitating a new call +// to Evaluate. +func (z *Lazy[T]) Notify() { z.cache.Swap(z.newCacheEntry()) } + +// Get should be called to get the current result of a call to Evaluate. If the +// current cached value is stale (due to a call to Notify), then Evaluate will +// be called synchronously. If subsequent calls to Get happen (without another +// Notify), they will all wait for the same return value. +// +// Error returns are not cached and will cause multiple calls to evaluate! +func (z *Lazy[T]) Get() (T, error) { + e := z.cache.Load() + if e == nil { + // Since we don't force a constructor, nil is a possible value. + // If multiple Gets race to set this, the swap makes sure only + // one wins. + z.cache.CompareAndSwap(nil, z.newCacheEntry()) + e = z.cache.Load() + } + return e.get() +} diff --git a/vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go b/vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go new file mode 100644 index 000000000..3fa2beb6b --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go @@ -0,0 +1,120 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package synctrack contains utilities for helping controllers track whether +// they are "synced" or not, that is, whether they have processed all items +// from the informer's initial list. +package synctrack + +import ( + "sync" + "sync/atomic" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// AsyncTracker helps propagate HasSynced in the face of multiple worker threads. +type AsyncTracker[T comparable] struct { + UpstreamHasSynced func() bool + + lock sync.Mutex + waiting sets.Set[T] +} + +// Start should be called prior to processing each key which is part of the +// initial list. +func (t *AsyncTracker[T]) Start(key T) { + t.lock.Lock() + defer t.lock.Unlock() + if t.waiting == nil { + t.waiting = sets.New[T](key) + } else { + t.waiting.Insert(key) + } +} + +// Finished should be called when finished processing a key which was part of +// the initial list. Since keys are tracked individually, nothing bad happens +// if you call Finished without a corresponding call to Start. This makes it +// easier to use this in combination with e.g. queues which don't make it easy +// to plumb through the isInInitialList boolean. +func (t *AsyncTracker[T]) Finished(key T) { + t.lock.Lock() + defer t.lock.Unlock() + if t.waiting != nil { + t.waiting.Delete(key) + } +} + +// HasSynced returns true if the source is synced and every key present in the +// initial list has been processed. This relies on the source not considering +// itself synced until *after* it has delivered the notification for the last +// key, and that notification handler must have called Start. +func (t *AsyncTracker[T]) HasSynced() bool { + // Call UpstreamHasSynced first: it might take a lock, which might take + // a significant amount of time, and we can't hold our lock while + // waiting on that or a user is likely to get a deadlock. + if !t.UpstreamHasSynced() { + return false + } + t.lock.Lock() + defer t.lock.Unlock() + return t.waiting.Len() == 0 +} + +// SingleFileTracker helps propagate HasSynced when events are processed in +// order (i.e. via a queue). +type SingleFileTracker struct { + // Important: count is used with atomic operations so it must be 64-bit + // aligned, otherwise atomic operations will panic. Having it at the top of + // the struct will guarantee that, even on 32-bit arches. + // See https://pkg.go.dev/sync/atomic#pkg-note-BUG for more information. + count int64 + + UpstreamHasSynced func() bool +} + +// Start should be called prior to processing each key which is part of the +// initial list. +func (t *SingleFileTracker) Start() { + atomic.AddInt64(&t.count, 1) +} + +// Finished should be called when finished processing a key which was part of +// the initial list. You must never call Finished() before (or without) its +// corresponding Start(), that is a logic error that could cause HasSynced to +// return a wrong value. To help you notice this should it happen, Finished() +// will panic if the internal counter goes negative. +func (t *SingleFileTracker) Finished() { + result := atomic.AddInt64(&t.count, -1) + if result < 0 { + panic("synctrack: negative counter; this logic error means HasSynced may return incorrect value") + } +} + +// HasSynced returns true if the source is synced and every key present in the +// initial list has been processed. This relies on the source not considering +// itself synced until *after* it has delivered the notification for the last +// key, and that notification handler must have called Start. +func (t *SingleFileTracker) HasSynced() bool { + // Call UpstreamHasSynced first: it might take a lock, which might take + // a significant amount of time, and we don't want to then act on a + // stale count value. + if !t.UpstreamHasSynced() { + return false + } + return atomic.LoadInt64(&t.count) <= 0 +} diff --git a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go index c64ba9b26..940e71617 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go @@ -64,9 +64,8 @@ import ( "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" rl "k8s.io/client-go/tools/leaderelection/resourcelock" - "k8s.io/utils/clock" - "k8s.io/klog/v2" + "k8s.io/utils/clock" ) const ( @@ -199,9 +198,7 @@ type LeaderElector struct { // stopped holding the leader lease func (le *LeaderElector) Run(ctx context.Context) { defer runtime.HandleCrash() - defer func() { - le.config.Callbacks.OnStoppedLeading() - }() + defer le.config.Callbacks.OnStoppedLeading() if !le.acquire(ctx) { return // ctx signalled done @@ -263,6 +260,7 @@ func (le *LeaderElector) acquire(ctx context.Context) bool { // renew loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew fails or ctx signals done. func (le *LeaderElector) renew(ctx context.Context) { + defer le.config.Lock.RecordEvent("stopped leading") ctx, cancel := context.WithCancel(ctx) defer cancel() wait.Until(func() { @@ -278,7 +276,6 @@ func (le *LeaderElector) renew(ctx context.Context) { klog.V(5).Infof("successfully renewed lease %v", desc) return } - le.config.Lock.RecordEvent("stopped leading") le.metrics.leaderOff(le.config.Name) klog.Infof("failed to renew lease %v: %v", desc, err) cancel() @@ -295,7 +292,7 @@ func (le *LeaderElector) release() bool { if !le.IsLeader() { return true } - now := metav1.Now() + now := metav1.NewTime(le.clock.Now()) leaderElectionRecord := rl.LeaderElectionRecord{ LeaderTransitions: le.observedRecord.LeaderTransitions, LeaseDurationSeconds: 1, @@ -315,7 +312,7 @@ func (le *LeaderElector) release() bool { // else it tries to renew the lease if it has already been acquired. Returns true // on success else returns false. func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool { - now := metav1.Now() + now := metav1.NewTime(le.clock.Now()) leaderElectionRecord := rl.LeaderElectionRecord{ HolderIdentity: le.config.Lock.Identity(), LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second), @@ -347,7 +344,7 @@ func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool { le.observedRawRecord = oldLeaderElectionRawRecord } if len(oldLeaderElectionRecord.HolderIdentity) > 0 && - le.observedTime.Add(le.config.LeaseDuration).After(now.Time) && + le.observedTime.Add(time.Second*time.Duration(oldLeaderElectionRecord.LeaseDurationSeconds)).After(now.Time) && !le.IsLeader() { klog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity) return false diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go index c6e23bda1..05b5b2023 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go @@ -68,7 +68,7 @@ const ( // name: '*' // namespace: kube-system EndpointsLeasesResourceLock = "endpointsleases" - // When using EndpointsLeasesResourceLock, you need to ensure that + // When using ConfigMapsLeasesResourceLock, you need to ensure that // API Priority & Fairness is configured with non-default flow-schema // that will catch the necessary operations on leader-election related // configmap objects. diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go index 185ef0e50..8a9d7d60f 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go @@ -117,10 +117,10 @@ func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElec r.LeaderTransitions = int(*spec.LeaseTransitions) } if spec.AcquireTime != nil { - r.AcquireTime = metav1.Time{spec.AcquireTime.Time} + r.AcquireTime = metav1.Time{Time: spec.AcquireTime.Time} } if spec.RenewTime != nil { - r.RenewTime = metav1.Time{spec.RenewTime.Time} + r.RenewTime = metav1.Time{Time: spec.RenewTime.Time} } return &r @@ -132,8 +132,8 @@ func LeaderElectionRecordToLeaseSpec(ler *LeaderElectionRecord) coordinationv1.L return coordinationv1.LeaseSpec{ HolderIdentity: &ler.HolderIdentity, LeaseDurationSeconds: &leaseDurationSeconds, - AcquireTime: &metav1.MicroTime{ler.AcquireTime.Time}, - RenewTime: &metav1.MicroTime{ler.RenewTime.Time}, + AcquireTime: &metav1.MicroTime{Time: ler.AcquireTime.Time}, + RenewTime: &metav1.MicroTime{Time: ler.RenewTime.Time}, LeaseTransitions: &leaseTransitions, } } diff --git a/vendor/k8s.io/client-go/tools/metrics/metrics.go b/vendor/k8s.io/client-go/tools/metrics/metrics.go index 6c684c7fa..f36430dc3 100644 --- a/vendor/k8s.io/client-go/tools/metrics/metrics.go +++ b/vendor/k8s.io/client-go/tools/metrics/metrics.go @@ -58,6 +58,12 @@ type CallsMetric interface { Increment(exitCode int, callStatus string) } +// RetryMetric counts the number of retries sent to the server +// partitioned by code, method, and host. +type RetryMetric interface { + IncrementRetry(ctx context.Context, code string, method string, host string) +} + var ( // ClientCertExpiry is the expiry time of a client certificate ClientCertExpiry ExpiryMetric = noopExpiry{} @@ -76,6 +82,9 @@ var ( // ExecPluginCalls is the number of calls made to an exec plugin, partitioned by // exit code and call status. ExecPluginCalls CallsMetric = noopCalls{} + // RequestRetry is the retry metric that tracks the number of + // retries sent to the server. + RequestRetry RetryMetric = noopRetry{} ) // RegisterOpts contains all the metrics to register. Metrics may be nil. @@ -88,6 +97,7 @@ type RegisterOpts struct { RateLimiterLatency LatencyMetric RequestResult ResultMetric ExecPluginCalls CallsMetric + RequestRetry RetryMetric } // Register registers metrics for the rest client to use. This can @@ -118,6 +128,9 @@ func Register(opts RegisterOpts) { if opts.ExecPluginCalls != nil { ExecPluginCalls = opts.ExecPluginCalls } + if opts.RequestRetry != nil { + RequestRetry = opts.RequestRetry + } }) } @@ -144,3 +157,7 @@ func (noopResult) Increment(context.Context, string, string, string) {} type noopCalls struct{} func (noopCalls) Increment(int, string) {} + +type noopRetry struct{} + +func (noopRetry) IncrementRetry(context.Context, string, string, string) {} diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go index 998bf8dfb..4899b362d 100644 --- a/vendor/k8s.io/client-go/tools/record/event.go +++ b/vendor/k8s.io/client-go/tools/record/event.go @@ -17,6 +17,7 @@ limitations under the License. package record import ( + "context" "fmt" "math/rand" "time" @@ -132,7 +133,9 @@ type EventBroadcaster interface { // with the event source set to the given event source. NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder - // Shutdown shuts down the broadcaster + // Shutdown shuts down the broadcaster. Once the broadcaster is shut + // down, it will only try to record an event in a sink once before + // giving up on it with an error message. Shutdown() } @@ -157,31 +160,34 @@ func (a *EventRecorderAdapter) Eventf(regarding, _ runtime.Object, eventtype, re // Creates a new event broadcaster. func NewBroadcaster() EventBroadcaster { - return &eventBroadcasterImpl{ - Broadcaster: watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), - sleepDuration: defaultSleepDuration, - } + return newEventBroadcaster(watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration) } func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster { - return &eventBroadcasterImpl{ - Broadcaster: watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), - sleepDuration: sleepDuration, - } + return newEventBroadcaster(watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), sleepDuration) } func NewBroadcasterWithCorrelatorOptions(options CorrelatorOptions) EventBroadcaster { - return &eventBroadcasterImpl{ - Broadcaster: watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), - sleepDuration: defaultSleepDuration, - options: options, + eventBroadcaster := newEventBroadcaster(watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration) + eventBroadcaster.options = options + return eventBroadcaster +} + +func newEventBroadcaster(broadcaster *watch.Broadcaster, sleepDuration time.Duration) *eventBroadcasterImpl { + eventBroadcaster := &eventBroadcasterImpl{ + Broadcaster: broadcaster, + sleepDuration: sleepDuration, } + eventBroadcaster.cancelationCtx, eventBroadcaster.cancel = context.WithCancel(context.Background()) + return eventBroadcaster } type eventBroadcasterImpl struct { *watch.Broadcaster - sleepDuration time.Duration - options CorrelatorOptions + sleepDuration time.Duration + options CorrelatorOptions + cancelationCtx context.Context + cancel func() } // StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. @@ -191,15 +197,16 @@ func (e *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interf eventCorrelator := NewEventCorrelatorWithOptions(e.options) return e.StartEventWatcher( func(event *v1.Event) { - recordToSink(sink, event, eventCorrelator, e.sleepDuration) + e.recordToSink(sink, event, eventCorrelator) }) } func (e *eventBroadcasterImpl) Shutdown() { e.Broadcaster.Shutdown() + e.cancel() } -func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator, sleepDuration time.Duration) { +func (e *eventBroadcasterImpl) recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator) { // Make a copy before modification, because there could be multiple listeners. // Events are safe to copy like this. eventCopy := *event @@ -221,12 +228,18 @@ func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrela klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) break } + // Randomize the first sleep so that various clients won't all be // synced up if the master goes down. + delay := e.sleepDuration if tries == 1 { - time.Sleep(time.Duration(float64(sleepDuration) * rand.Float64())) - } else { - time.Sleep(sleepDuration) + delay = time.Duration(float64(delay) * rand.Float64()) + } + select { + case <-e.cancelationCtx.Done(): + klog.Errorf("Unable to write event '%#v' (broadcaster is shut down)", event) + return + case <-time.After(delay): } } } diff --git a/vendor/k8s.io/client-go/tools/record/fake.go b/vendor/k8s.io/client-go/tools/record/fake.go index 0b3f344a9..fda4ad8ff 100644 --- a/vendor/k8s.io/client-go/tools/record/fake.go +++ b/vendor/k8s.io/client-go/tools/record/fake.go @@ -41,20 +41,31 @@ func objectString(object runtime.Object, includeObject bool) string { ) } -func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) { - if f.Events != nil { - f.Events <- fmt.Sprintf("%s %s %s%s", eventtype, reason, message, objectString(object, f.IncludeObject)) +func annotationsString(annotations map[string]string) string { + if len(annotations) == 0 { + return "" + } else { + return " " + fmt.Sprint(annotations) } } -func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { +func (f *FakeRecorder) writeEvent(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { if f.Events != nil { - f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) + objectString(object, f.IncludeObject) + f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) + + objectString(object, f.IncludeObject) + annotationsString(annotations) } } +func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) { + f.writeEvent(object, nil, eventtype, reason, "%s", message) +} + +func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + f.writeEvent(object, nil, eventtype, reason, messageFmt, args...) +} + func (f *FakeRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { - f.Eventf(object, eventtype, reason, messageFmt, args...) + f.writeEvent(object, annotations, eventtype, reason, messageFmt, args...) } // NewFakeRecorder creates new fake event recorder with event channel with diff --git a/vendor/k8s.io/client-go/tools/watch/until.go b/vendor/k8s.io/client-go/tools/watch/until.go index 81d4ff0dd..a2474556b 100644 --- a/vendor/k8s.io/client-go/tools/watch/until.go +++ b/vendor/k8s.io/client-go/tools/watch/until.go @@ -101,8 +101,7 @@ func UntilWithoutRetry(ctx context.Context, watcher watch.Interface, conditions // It guarantees you to see all events and in the order they happened. // Due to this guarantee there is no way it can deal with 'Resource version too old error'. It will fail in this case. // (See `UntilWithSync` if you'd prefer to recover from all the errors including RV too old by re-listing -// -// those items. In normal code you should care about being level driven so you'd not care about not seeing all the edges.) +// those items. In normal code you should care about being level driven so you'd not care about not seeing all the edges.) // // The most frequent usage for Until would be a test where you want to verify exact order of events ("edges"). func Until(ctx context.Context, initialResourceVersion string, watcherClient cache.Watcher, conditions ...ConditionFunc) (*watch.Event, error) { @@ -137,7 +136,7 @@ func UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime. if precondition != nil { if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) { - return nil, fmt.Errorf("UntilWithSync: unable to sync caches: %v", ctx.Err()) + return nil, fmt.Errorf("UntilWithSync: unable to sync caches: %w", ctx.Err()) } done, err := precondition(indexer) diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go index 7196cf890..4be1dfe49 100644 --- a/vendor/k8s.io/client-go/util/cert/cert.go +++ b/vendor/k8s.io/client-go/util/cert/cert.go @@ -191,7 +191,7 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a if err := os.WriteFile(certFixturePath, certBuffer.Bytes(), 0644); err != nil { return nil, nil, fmt.Errorf("failed to write cert fixture to %s: %v", certFixturePath, err) } - if err := os.WriteFile(keyFixturePath, keyBuffer.Bytes(), 0644); err != nil { + if err := os.WriteFile(keyFixturePath, keyBuffer.Bytes(), 0600); err != nil { return nil, nil, fmt.Errorf("failed to write key fixture to %s: %v", certFixturePath, err) } } diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go index 26eacc2ba..c1df72030 100644 --- a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -33,38 +33,81 @@ type DelayingInterface interface { AddAfter(item interface{}, duration time.Duration) } +// DelayingQueueConfig specifies optional configurations to customize a DelayingInterface. +type DelayingQueueConfig struct { + // Name for the queue. If unnamed, the metrics will not be registered. + Name string + + // MetricsProvider optionally allows specifying a metrics provider to use for the queue + // instead of the global provider. + MetricsProvider MetricsProvider + + // Clock optionally allows injecting a real or fake clock for testing purposes. + Clock clock.WithTicker + + // Queue optionally allows injecting custom queue Interface instead of the default one. + Queue Interface +} + // NewDelayingQueue constructs a new workqueue with delayed queuing ability. // NewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use -// NewNamedDelayingQueue instead. +// NewDelayingQueueWithConfig instead and specify a name. func NewDelayingQueue() DelayingInterface { - return NewDelayingQueueWithCustomClock(clock.RealClock{}, "") + return NewDelayingQueueWithConfig(DelayingQueueConfig{}) +} + +// NewDelayingQueueWithConfig constructs a new workqueue with options to +// customize different properties. +func NewDelayingQueueWithConfig(config DelayingQueueConfig) DelayingInterface { + if config.Clock == nil { + config.Clock = clock.RealClock{} + } + + if config.Queue == nil { + config.Queue = NewWithConfig(QueueConfig{ + Name: config.Name, + MetricsProvider: config.MetricsProvider, + Clock: config.Clock, + }) + } + + return newDelayingQueue(config.Clock, config.Queue, config.Name, config.MetricsProvider) } // NewDelayingQueueWithCustomQueue constructs a new workqueue with ability to // inject custom queue Interface instead of the default one +// Deprecated: Use NewDelayingQueueWithConfig instead. func NewDelayingQueueWithCustomQueue(q Interface, name string) DelayingInterface { - return newDelayingQueue(clock.RealClock{}, q, name) + return NewDelayingQueueWithConfig(DelayingQueueConfig{ + Name: name, + Queue: q, + }) } -// NewNamedDelayingQueue constructs a new named workqueue with delayed queuing ability +// NewNamedDelayingQueue constructs a new named workqueue with delayed queuing ability. +// Deprecated: Use NewDelayingQueueWithConfig instead. func NewNamedDelayingQueue(name string) DelayingInterface { - return NewDelayingQueueWithCustomClock(clock.RealClock{}, name) + return NewDelayingQueueWithConfig(DelayingQueueConfig{Name: name}) } // NewDelayingQueueWithCustomClock constructs a new named workqueue -// with ability to inject real or fake clock for testing purposes +// with ability to inject real or fake clock for testing purposes. +// Deprecated: Use NewDelayingQueueWithConfig instead. func NewDelayingQueueWithCustomClock(clock clock.WithTicker, name string) DelayingInterface { - return newDelayingQueue(clock, NewNamed(name), name) + return NewDelayingQueueWithConfig(DelayingQueueConfig{ + Name: name, + Clock: clock, + }) } -func newDelayingQueue(clock clock.WithTicker, q Interface, name string) *delayingType { +func newDelayingQueue(clock clock.WithTicker, q Interface, name string, provider MetricsProvider) *delayingType { ret := &delayingType{ Interface: q, clock: clock, heartbeat: clock.NewTicker(maxWait), stopCh: make(chan struct{}), waitingForAddCh: make(chan *waitFor, 1000), - metrics: newRetryMetrics(name), + metrics: newRetryMetrics(name, provider), } go ret.waitingLoop() diff --git a/vendor/k8s.io/client-go/util/workqueue/metrics.go b/vendor/k8s.io/client-go/util/workqueue/metrics.go index 4b0a69616..f012ccc55 100644 --- a/vendor/k8s.io/client-go/util/workqueue/metrics.go +++ b/vendor/k8s.io/client-go/util/workqueue/metrics.go @@ -244,13 +244,18 @@ func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) qu } } -func newRetryMetrics(name string) retryMetrics { +func newRetryMetrics(name string, provider MetricsProvider) retryMetrics { var ret *defaultRetryMetrics if len(name) == 0 { return ret } + + if provider == nil { + provider = globalMetricsFactory.metricsProvider + } + return &defaultRetryMetrics{ - retries: globalMetricsFactory.metricsProvider.NewRetriesMetric(name), + retries: provider.NewRetriesMetric(name), } } diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go index 6f7063269..380c06455 100644 --- a/vendor/k8s.io/client-go/util/workqueue/queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/queue.go @@ -33,17 +33,60 @@ type Interface interface { ShuttingDown() bool } +// QueueConfig specifies optional configurations to customize an Interface. +type QueueConfig struct { + // Name for the queue. If unnamed, the metrics will not be registered. + Name string + + // MetricsProvider optionally allows specifying a metrics provider to use for the queue + // instead of the global provider. + MetricsProvider MetricsProvider + + // Clock ability to inject real or fake clock for testing purposes. + Clock clock.WithTicker +} + // New constructs a new work queue (see the package comment). func New() *Type { - return NewNamed("") + return NewWithConfig(QueueConfig{ + Name: "", + }) } +// NewWithConfig constructs a new workqueue with ability to +// customize different properties. +func NewWithConfig(config QueueConfig) *Type { + return newQueueWithConfig(config, defaultUnfinishedWorkUpdatePeriod) +} + +// NewNamed creates a new named queue. +// Deprecated: Use NewWithConfig instead. func NewNamed(name string) *Type { - rc := clock.RealClock{} + return NewWithConfig(QueueConfig{ + Name: name, + }) +} + +// newQueueWithConfig constructs a new named workqueue +// with the ability to customize different properties for testing purposes +func newQueueWithConfig(config QueueConfig, updatePeriod time.Duration) *Type { + var metricsFactory *queueMetricsFactory + if config.MetricsProvider != nil { + metricsFactory = &queueMetricsFactory{ + metricsProvider: config.MetricsProvider, + } + } else { + metricsFactory = &globalMetricsFactory + } + + if config.Clock == nil { + config.Clock = clock.RealClock{} + } + return newQueue( - rc, - globalMetricsFactory.newQueueMetrics(name, rc), - defaultUnfinishedWorkUpdatePeriod, + config.Clock, + metricsFactory.newQueueMetrics(config.Name, config.Clock), + updatePeriod, ) } diff --git a/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go index 91cd33f19..3e4016fb0 100644 --- a/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go @@ -16,6 +16,8 @@ limitations under the License. package workqueue +import "k8s.io/utils/clock" + // RateLimitingInterface is an interface that rate limits items being added to the queue. type RateLimitingInterface interface { DelayingInterface @@ -32,29 +34,68 @@ type RateLimitingInterface interface { NumRequeues(item interface{}) int } +// RateLimitingQueueConfig specifies optional configurations to customize a RateLimitingInterface. + +type RateLimitingQueueConfig struct { + // Name for the queue. If unnamed, the metrics will not be registered. + Name string + + // MetricsProvider optionally allows specifying a metrics provider to use for the queue + // instead of the global provider. + MetricsProvider MetricsProvider + + // Clock optionally allows injecting a real or fake clock for testing purposes. + Clock clock.WithTicker + + // DelayingQueue optionally allows injecting custom delaying queue DelayingInterface instead of the default one. + DelayingQueue DelayingInterface +} + // NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability // Remember to call Forget! If you don't, you may end up tracking failures forever. // NewRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use -// NewNamedRateLimitingQueue instead. +// NewRateLimitingQueueWithConfig instead and specify a name. func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface { + return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{}) +} + +// NewRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability +// with options to customize different properties. +// Remember to call Forget! If you don't, you may end up tracking failures forever. +func NewRateLimitingQueueWithConfig(rateLimiter RateLimiter, config RateLimitingQueueConfig) RateLimitingInterface { + if config.Clock == nil { + config.Clock = clock.RealClock{} + } + + if config.DelayingQueue == nil { + config.DelayingQueue = NewDelayingQueueWithConfig(DelayingQueueConfig{ + Name: config.Name, + MetricsProvider: config.MetricsProvider, + Clock: config.Clock, + }) + } + return &rateLimitingType{ - DelayingInterface: NewDelayingQueue(), + DelayingInterface: config.DelayingQueue, rateLimiter: rateLimiter, } } +// NewNamedRateLimitingQueue constructs a new named workqueue with rateLimited queuing ability. +// Deprecated: Use NewRateLimitingQueueWithConfig instead. func NewNamedRateLimitingQueue(rateLimiter RateLimiter, name string) RateLimitingInterface { - return &rateLimitingType{ - DelayingInterface: NewNamedDelayingQueue(name), - rateLimiter: rateLimiter, - } + return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{ + Name: name, + }) } +// NewRateLimitingQueueWithDelayingInterface constructs a new named workqueue with rateLimited queuing ability +// with the option to inject a custom delaying queue instead of the default one. +// Deprecated: Use NewRateLimitingQueueWithConfig instead. func NewRateLimitingQueueWithDelayingInterface(di DelayingInterface, rateLimiter RateLimiter) RateLimitingInterface { - return &rateLimitingType{ - DelayingInterface: di, - rateLimiter: rateLimiter, - } + return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{ + DelayingQueue: di, + }) } // rateLimitingType wraps an Interface and provides rateLimited re-enquing diff --git a/vendor/k8s.io/component-base/config/types.go b/vendor/k8s.io/component-base/config/types.go index aad605eee..e1b9469d7 100644 --- a/vendor/k8s.io/component-base/config/types.go +++ b/vendor/k8s.io/component-base/config/types.go @@ -74,7 +74,7 @@ type LeaderElectionConfiguration struct { type DebuggingConfiguration struct { // enableProfiling enables profiling via web interface host:port/debug/pprof/ EnableProfiling bool - // enableContentionProfiling enables lock contention profiling, if + // enableContentionProfiling enables block profiling, if // enableProfiling is true. EnableContentionProfiling bool } diff --git a/vendor/k8s.io/component-base/config/v1alpha1/types.go b/vendor/k8s.io/component-base/config/v1alpha1/types.go index c9d05525d..3c5f004f2 100644 --- a/vendor/k8s.io/component-base/config/v1alpha1/types.go +++ b/vendor/k8s.io/component-base/config/v1alpha1/types.go @@ -60,7 +60,7 @@ type LeaderElectionConfiguration struct { type DebuggingConfiguration struct { // enableProfiling enables profiling via web interface host:port/debug/pprof/ EnableProfiling *bool `json:"enableProfiling,omitempty"` - // enableContentionProfiling enables lock contention profiling, if + // enableContentionProfiling enables block profiling, if // enableProfiling is true. EnableContentionProfiling *bool `json:"enableContentionProfiling,omitempty"` } diff --git a/vendor/modules.txt b/vendor/modules.txt index a7946c094..1d2b86065 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -415,16 +415,16 @@ github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/prometheus/client_golang v1.14.0 +# github.com/prometheus/client_golang v1.15.1 ## explicit; go 1.17 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp -# github.com/prometheus/client_model v0.3.0 -## explicit; go 1.9 +# github.com/prometheus/client_model v0.4.0 +## explicit; go 1.18 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.41.0 +# github.com/prometheus/common v0.42.0 ## explicit; go 1.18 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg @@ -590,8 +590,8 @@ golang.org/x/text/width # golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate -# gomodules.xyz/jsonpatch/v2 v2.2.0 -## explicit; go 1.12 +# gomodules.xyz/jsonpatch/v2 v2.3.0 +## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 # gomodules.xyz/jsonpatch/v3 v3.0.1 ## explicit; go 1.12 @@ -663,8 +663,8 @@ gotest.tools/v3/internal/assert gotest.tools/v3/internal/difflib gotest.tools/v3/internal/format gotest.tools/v3/internal/source -# k8s.io/api v0.26.3 -## explicit; go 1.19 +# k8s.io/api v0.27.2 +## explicit; go 1.20 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 k8s.io/api/admissionregistration/v1 @@ -687,6 +687,7 @@ k8s.io/api/autoscaling/v2beta2 k8s.io/api/batch/v1 k8s.io/api/batch/v1beta1 k8s.io/api/certificates/v1 +k8s.io/api/certificates/v1alpha1 k8s.io/api/certificates/v1beta1 k8s.io/api/coordination/v1 k8s.io/api/coordination/v1beta1 @@ -712,14 +713,14 @@ k8s.io/api/policy/v1beta1 k8s.io/api/rbac/v1 k8s.io/api/rbac/v1alpha1 k8s.io/api/rbac/v1beta1 -k8s.io/api/resource/v1alpha1 +k8s.io/api/resource/v1alpha2 k8s.io/api/scheduling/v1 k8s.io/api/scheduling/v1alpha1 k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.26.2 => k8s.io/apiextensions-apiserver v0.23.4 +# k8s.io/apiextensions-apiserver v0.27.2 => k8s.io/apiextensions-apiserver v0.23.4 ## explicit; go 1.16 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -780,8 +781,8 @@ k8s.io/apimachinery/third_party/forked/golang/reflect # k8s.io/cli-runtime v0.26.3 ## explicit; go 1.19 k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.26.3 -## explicit; go 1.19 +# k8s.io/client-go v0.27.2 +## explicit; go 1.20 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -796,6 +797,7 @@ k8s.io/client-go/applyconfigurations/autoscaling/v2beta2 k8s.io/client-go/applyconfigurations/batch/v1 k8s.io/client-go/applyconfigurations/batch/v1beta1 k8s.io/client-go/applyconfigurations/certificates/v1 +k8s.io/client-go/applyconfigurations/certificates/v1alpha1 k8s.io/client-go/applyconfigurations/certificates/v1beta1 k8s.io/client-go/applyconfigurations/coordination/v1 k8s.io/client-go/applyconfigurations/coordination/v1beta1 @@ -822,7 +824,7 @@ k8s.io/client-go/applyconfigurations/policy/v1beta1 k8s.io/client-go/applyconfigurations/rbac/v1 k8s.io/client-go/applyconfigurations/rbac/v1alpha1 k8s.io/client-go/applyconfigurations/rbac/v1beta1 -k8s.io/client-go/applyconfigurations/resource/v1alpha1 +k8s.io/client-go/applyconfigurations/resource/v1alpha2 k8s.io/client-go/applyconfigurations/scheduling/v1 k8s.io/client-go/applyconfigurations/scheduling/v1alpha1 k8s.io/client-go/applyconfigurations/scheduling/v1beta1 @@ -854,6 +856,7 @@ k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2 k8s.io/client-go/kubernetes/typed/batch/v1 k8s.io/client-go/kubernetes/typed/batch/v1beta1 k8s.io/client-go/kubernetes/typed/certificates/v1 +k8s.io/client-go/kubernetes/typed/certificates/v1alpha1 k8s.io/client-go/kubernetes/typed/certificates/v1beta1 k8s.io/client-go/kubernetes/typed/coordination/v1 k8s.io/client-go/kubernetes/typed/coordination/v1beta1 @@ -878,7 +881,7 @@ k8s.io/client-go/kubernetes/typed/policy/v1beta1 k8s.io/client-go/kubernetes/typed/rbac/v1 k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 k8s.io/client-go/kubernetes/typed/rbac/v1beta1 -k8s.io/client-go/kubernetes/typed/resource/v1alpha1 +k8s.io/client-go/kubernetes/typed/resource/v1alpha2 k8s.io/client-go/kubernetes/typed/scheduling/v1 k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 @@ -905,6 +908,7 @@ k8s.io/client-go/restmapper k8s.io/client-go/testing k8s.io/client-go/tools/auth k8s.io/client-go/tools/cache +k8s.io/client-go/tools/cache/synctrack k8s.io/client-go/tools/clientcmd k8s.io/client-go/tools/clientcmd/api k8s.io/client-go/tools/clientcmd/api/latest @@ -924,8 +928,8 @@ k8s.io/client-go/util/flowcontrol k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.26.3 -## explicit; go 1.19 +# k8s.io/component-base v0.27.2 +## explicit; go 1.20 k8s.io/component-base/config k8s.io/component-base/config/v1alpha1 # k8s.io/klog/v2 v2.90.1 @@ -964,8 +968,8 @@ k8s.io/utils/net k8s.io/utils/pointer k8s.io/utils/strings/slices k8s.io/utils/trace -# sigs.k8s.io/controller-runtime v0.14.6 -## explicit; go 1.19 +# sigs.k8s.io/controller-runtime v0.15.0 +## explicit; go 1.20 sigs.k8s.io/controller-runtime sigs.k8s.io/controller-runtime/pkg/builder sigs.k8s.io/controller-runtime/pkg/cache @@ -976,6 +980,7 @@ sigs.k8s.io/controller-runtime/pkg/client sigs.k8s.io/controller-runtime/pkg/client/apiutil sigs.k8s.io/controller-runtime/pkg/client/config sigs.k8s.io/controller-runtime/pkg/client/fake +sigs.k8s.io/controller-runtime/pkg/client/interceptor sigs.k8s.io/controller-runtime/pkg/cluster sigs.k8s.io/controller-runtime/pkg/config sigs.k8s.io/controller-runtime/pkg/config/v1alpha1 @@ -992,6 +997,7 @@ sigs.k8s.io/controller-runtime/pkg/internal/httpserver sigs.k8s.io/controller-runtime/pkg/internal/log sigs.k8s.io/controller-runtime/pkg/internal/objectutil sigs.k8s.io/controller-runtime/pkg/internal/recorder +sigs.k8s.io/controller-runtime/pkg/internal/source sigs.k8s.io/controller-runtime/pkg/leaderelection sigs.k8s.io/controller-runtime/pkg/log sigs.k8s.io/controller-runtime/pkg/manager @@ -1001,10 +1007,8 @@ sigs.k8s.io/controller-runtime/pkg/predicate sigs.k8s.io/controller-runtime/pkg/ratelimiter sigs.k8s.io/controller-runtime/pkg/reconcile sigs.k8s.io/controller-runtime/pkg/recorder -sigs.k8s.io/controller-runtime/pkg/runtime/inject sigs.k8s.io/controller-runtime/pkg/scheme sigs.k8s.io/controller-runtime/pkg/source -sigs.k8s.io/controller-runtime/pkg/source/internal sigs.k8s.io/controller-runtime/pkg/webhook sigs.k8s.io/controller-runtime/pkg/webhook/admission sigs.k8s.io/controller-runtime/pkg/webhook/conversion diff --git a/vendor/sigs.k8s.io/controller-runtime/.golangci.yml b/vendor/sigs.k8s.io/controller-runtime/.golangci.yml index 209b7f4e6..817c2c723 100644 --- a/vendor/sigs.k8s.io/controller-runtime/.golangci.yml +++ b/vendor/sigs.k8s.io/controller-runtime/.golangci.yml @@ -1,38 +1,44 @@ linters: disable-all: true enable: - - asciicheck - - bodyclose - - depguard - - dogsled - - errcheck - - errorlint - - exportloopref - - goconst - - gocritic - - gocyclo - - gofmt - - goimports - - goprintffuncname - - gosec - - gosimple - - govet - - importas - - ineffassign - - misspell - - nakedret - - nilerr - - nolintlint - - prealloc - - revive - - rowserrcheck - - staticcheck - - stylecheck - - typecheck - - unconvert - - unparam - - unused - - whitespace + - asasalint + - asciicheck + - bidichk + - bodyclose + - depguard + - dogsled + - dupl + - errcheck + - errchkjson + - errorlint + - exhaustive + - exportloopref + - goconst + - gocritic + - gocyclo + - gofmt + - goimports + - goprintffuncname + - gosec + - gosimple + - govet + - importas + - ineffassign + - makezero + - misspell + - nakedret + - nilerr + - nolintlint + - prealloc + - revive + - staticcheck + - stylecheck + - tagliatelle + - typecheck + - unconvert + - unparam + - unused + - whitespace linters-settings: importas: @@ -53,13 +59,42 @@ linters-settings: - pkg: sigs.k8s.io/controller-runtime alias: ctrl staticcheck: - go: "1.19" + go: "1.20" stylecheck: - go: "1.19" + go: "1.20" depguard: include-go-root: true packages: - io/ioutil # https://go.dev/doc/go1.16#ioutil + revive: + rules: + # The following rules are recommended https://github.com/mgechev/revive#recommended-configuration + - name: blank-imports + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: error-return + - name: error-strings + - name: error-naming + - name: exported + - name: if-return + - name: increment-decrement + - name: var-naming + - name: var-declaration + - name: range + - name: receiver-naming + - name: time-naming + - name: unexported-return + - name: indent-error-flow + - name: errorf + - name: superfluous-else + - name: unreachable-code + - name: redefines-builtin-id + # + # Rules in addition to the recommended configuration above. + # + - name: bool-literal-in-expr + - name: constant-logical-expr issues: max-same-issues: 0 @@ -69,68 +104,71 @@ issues: exclude-use-default: false # List of regexps of issue texts to exclude, empty list by default. exclude: - # The following are being worked on to remove their exclusion. This list should be reduced or go away all together over time. - # If it is decided they will not be addressed they should be moved above this comment. - - Subprocess launch(ed with variable|ing should be audited) - - (G204|G104|G307) - - "ST1000: at least one file in a package should have a package comment" + # The following are being worked on to remove their exclusion. This list should be reduced or go away all together over time. + # If it is decided they will not be addressed they should be moved above this comment. + - Subprocess launch(ed with variable|ing should be audited) + - (G204|G104|G307) + - "ST1000: at least one file in a package should have a package comment" exclude-rules: - - linters: - - gosec - text: "G108: Profiling endpoint is automatically exposed on /debug/pprof" - - linters: - - revive - text: "exported: exported method .*\\.(Reconcile|SetupWithManager|SetupWebhookWithManager) should have comment or be unexported" - - linters: - - errcheck - text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked - # With Go 1.16, the new embed directive can be used with an un-named import, - # revive (previously, golint) only allows these to be imported in a main.go, which wouldn't work for us. - # This directive allows the embed package to be imported with an underscore everywhere. - - linters: - - revive - source: _ "embed" - # Exclude some packages or code to require comments, for example test code, or fake clients. - - linters: - - revive - text: exported (method|function|type|const) (.+) should have comment or be unexported - source: (func|type).*Fake.* - - linters: - - revive - text: exported (method|function|type|const) (.+) should have comment or be unexported - path: fake_\.go - # Disable unparam "always receives" which might not be really - # useful when building libraries. - - linters: - - unparam - text: always receives - # Dot imports for gomega or ginkgo are allowed - # within test files. - - path: _test\.go - text: should not use dot imports - - path: _test\.go - text: cyclomatic complexity - - path: _test\.go - text: "G107: Potential HTTP request made with variable url" - # Append should be able to assign to a different var/slice. - - linters: - - gocritic - text: "appendAssign: append result not assigned to the same slice" - - linters: - - gocritic - text: "singleCaseSwitch: should rewrite switch statement to if statement" - # It considers all file access to a filename that comes from a variable problematic, - # which is naiv at best. - - linters: - - gosec - text: "G304: Potential file inclusion via variable" - - linters: - - revive - text: "package-comments: should have a package comment" + - linters: + - gosec + text: "G108: Profiling endpoint is automatically exposed on /debug/pprof" + - linters: + - revive + text: "exported: exported method .*\\.(Reconcile|SetupWithManager|SetupWebhookWithManager) should have comment or be unexported" + - linters: + - errcheck + text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked + - linters: + - staticcheck + text: "SA1019: .*The component config package has been deprecated and will be removed in a future release." + # With Go 1.16, the new embed directive can be used with an un-named import, + # revive (previously, golint) only allows these to be imported in a main.go, which wouldn't work for us. + # This directive allows the embed package to be imported with an underscore everywhere. + - linters: + - revive + source: _ "embed" + # Exclude some packages or code to require comments, for example test code, or fake clients. + - linters: + - revive + text: exported (method|function|type|const) (.+) should have comment or be unexported + source: (func|type).*Fake.* + - linters: + - revive + text: exported (method|function|type|const) (.+) should have comment or be unexported + path: fake_\.go + # Disable unparam "always receives" which might not be really + # useful when building libraries. + - linters: + - unparam + text: always receives + # Dot imports for gomega and ginkgo are allowed + # within test files. + - path: _test\.go + text: should not use dot imports + - path: _test\.go + text: cyclomatic complexity + - path: _test\.go + text: "G107: Potential HTTP request made with variable url" + # Append should be able to assign to a different var/slice. + - linters: + - gocritic + text: "appendAssign: append result not assigned to the same slice" + - linters: + - gocritic + text: "singleCaseSwitch: should rewrite switch statement to if statement" + # It considers all file access to a filename that comes from a variable problematic, + # which is naiv at best. + - linters: + - gosec + text: "G304: Potential file inclusion via variable" + - linters: + - dupl + path: _test\.go run: timeout: 10m skip-files: - - "zz_generated.*\\.go$" - - ".*conversion.*\\.go$" + - "zz_generated.*\\.go$" + - ".*conversion.*\\.go$" allow-parallel-runners: true diff --git a/vendor/sigs.k8s.io/controller-runtime/Makefile b/vendor/sigs.k8s.io/controller-runtime/Makefile index 36647c697..71ec644de 100644 --- a/vendor/sigs.k8s.io/controller-runtime/Makefile +++ b/vendor/sigs.k8s.io/controller-runtime/Makefile @@ -75,7 +75,7 @@ $(CONTROLLER_GEN): $(TOOLS_DIR)/go.mod # Build controller-gen from tools folder. $(GOLANGCI_LINT): .github/workflows/golangci-lint.yml # Download golanci-lint using hack script into tools folder. hack/ensure-golangci-lint.sh \ -b $(TOOLS_BIN_DIR) \ - $(shell cat .github/workflows/golangci-lint.yml | grep version | sed 's/.*version: //') + $(shell cat .github/workflows/golangci-lint.yml | grep "version: v" | sed 's/.*version: //') ## -------------------------------------- ## Linting @@ -117,7 +117,15 @@ clean-bin: ## Remove all generated binaries. rm -rf hack/tools/bin .PHONY: verify-modules -verify-modules: modules - @if !(git diff --quiet HEAD -- go.sum go.mod); then \ +verify-modules: modules ## Verify go modules are up to date + @if !(git diff --quiet HEAD -- go.sum go.mod $(TOOLS_DIR)/go.mod $(TOOLS_DIR)/go.sum $(ENVTEST_DIR)/go.mod $(ENVTEST_DIR)/go.sum); then \ + git diff; \ echo "go module files are out of date, please run 'make modules'"; exit 1; \ fi + +.PHONY: verify-generate +verify-generate: generate ## Verify generated files are up to date + @if !(git diff --quiet HEAD); then \ + git diff; \ + echo "generated files are out of date, run make generate"; exit 1; \ + fi diff --git a/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES b/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES index 710894784..7848941d5 100644 --- a/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES +++ b/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES @@ -9,23 +9,21 @@ aliases: # non-admin folks who have write-access and can approve any PRs in the repo controller-runtime-maintainers: - - vincepri + - alvaroaleman - joelanford + - sbueringer + - vincepri # non-admin folks who can approve any PRs in the repo controller-runtime-approvers: - - alvaroaleman - fillzpp - - sbueringer # folks who can review and LGTM any PRs in the repo (doesn't # include approvers & admins -- those count too via the OWNERS # file) controller-runtime-reviewers: - - vincepri - varshaprasad96 - - fillzpp - - sbueringer + - inteon # folks to can approve things in the directly-ported # testing_frameworks portions of the codebase diff --git a/vendor/sigs.k8s.io/controller-runtime/README.md b/vendor/sigs.k8s.io/controller-runtime/README.md index 484881dce..e785abdd7 100644 --- a/vendor/sigs.k8s.io/controller-runtime/README.md +++ b/vendor/sigs.k8s.io/controller-runtime/README.md @@ -16,8 +16,8 @@ Documentation: - [Basic controller using builder](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/builder#example-Builder) - [Creating a manager](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/manager#example-New) - [Creating a controller](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/controller#example-New) -- [Examples](https://github.com/kubernetes-sigs/controller-runtime/blob/master/examples) -- [Designs](https://github.com/kubernetes-sigs/controller-runtime/blob/master/designs) +- [Examples](https://github.com/kubernetes-sigs/controller-runtime/blob/main/examples) +- [Designs](https://github.com/kubernetes-sigs/controller-runtime/blob/main/designs) # Versioning, Maintenance, and Compatibility @@ -27,7 +27,7 @@ Users: - We follow [Semantic Versioning (semver)](https://semver.org) - Use releases with your dependency management to ensure that you get compatible code -- The master branch contains all the latest code, some of which may break compatibility (so "normal" `go get` is not recommended) +- The main branch contains all the latest code, some of which may break compatibility (so "normal" `go get` is not recommended) Contributors: diff --git a/vendor/sigs.k8s.io/controller-runtime/RELEASE.md b/vendor/sigs.k8s.io/controller-runtime/RELEASE.md index 134a73a31..f234494fe 100644 --- a/vendor/sigs.k8s.io/controller-runtime/RELEASE.md +++ b/vendor/sigs.k8s.io/controller-runtime/RELEASE.md @@ -10,7 +10,7 @@ to create a new branch you will just need to ensure that all big fixes are cherr ### Create the new branch and the release tag -1. Create a new branch `git checkout -b release-` from master +1. Create a new branch `git checkout -b release-` from main 2. Push the new branch to the remote repository ### Now, let's generate the changelog diff --git a/vendor/sigs.k8s.io/controller-runtime/SECURITY_CONTACTS b/vendor/sigs.k8s.io/controller-runtime/SECURITY_CONTACTS index 32e6a3b90..9c5241c6b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/SECURITY_CONTACTS +++ b/vendor/sigs.k8s.io/controller-runtime/SECURITY_CONTACTS @@ -10,5 +10,6 @@ # DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE # INSTRUCTIONS AT https://kubernetes.io/security/ -pwittrock -droot +alvaroaleman +sbueringer +vincepri diff --git a/vendor/sigs.k8s.io/controller-runtime/alias.go b/vendor/sigs.k8s.io/controller-runtime/alias.go index 35cba30be..237963889 100644 --- a/vendor/sigs.k8s.io/controller-runtime/alias.go +++ b/vendor/sigs.k8s.io/controller-runtime/alias.go @@ -99,6 +99,8 @@ var ( // ConfigFile returns the cfg.File function for deferred config file loading, // this is passed into Options{}.From() to populate the Options fields for // the manager. + // + // Deprecated: This is deprecated in favor of using Options directly. ConfigFile = cfg.File // NewControllerManagedBy returns a new controller builder that will be started by the provided Manager. @@ -139,7 +141,7 @@ var ( // The logger, when used with controllers, can be expected to contain basic information about the object // that's being reconciled like: // - `reconciler group` and `reconciler kind` coming from the For(...) object passed in when building a controller. - // - `name` and `namespace` injected from the reconciliation request. + // - `name` and `namespace` from the reconciliation request. // // This is meant to be used with the context supplied in a struct that satisfies the Reconciler interface. LoggerFrom = log.FromContext diff --git a/vendor/sigs.k8s.io/controller-runtime/doc.go b/vendor/sigs.k8s.io/controller-runtime/doc.go index fa6c532c4..0319bc3ff 100644 --- a/vendor/sigs.k8s.io/controller-runtime/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/doc.go @@ -46,13 +46,13 @@ limitations under the License. // // Frequently asked questions about using controller-runtime and designing // controllers can be found at -// https://github.com/kubernetes-sigs/controller-runtime/blob/master/FAQ.md. +// https://github.com/kubernetes-sigs/controller-runtime/blob/main/FAQ.md. // // # Managers // // Every controller and webhook is ultimately run by a Manager (pkg/manager). A // manager is responsible for running controllers and webhooks, and setting up -// common dependencies (pkg/runtime/inject), like shared caches and clients, as +// common dependencies, like shared caches and clients, as // well as managing leader election (pkg/leaderelection). Managers are // generally configured to gracefully shut down controllers on pod termination // by wiring up a signal handler (pkg/manager/signals). diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go index 03f9633a7..570cfd63d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go @@ -30,6 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" + internalsource "sigs.k8s.io/controller-runtime/pkg/internal/source" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -96,14 +97,20 @@ func (blder *Builder) For(object client.Object, opts ...ForOption) *Builder { // OwnsInput represents the information set by Owns method. type OwnsInput struct { + matchEveryOwner bool object client.Object predicates []predicate.Predicate objectProjection objectProjection } // Owns defines types of Objects being *generated* by the ControllerManagedBy, and configures the ControllerManagedBy to respond to -// create / delete / update events by *reconciling the owner object*. This is the equivalent of calling -// Watches(&source.Kind{Type: }, &handler.EnqueueRequestForOwner{OwnerType: apiType, IsController: true}). +// create / delete / update events by *reconciling the owner object*. +// +// The default behavior reconciles only the first controller-type OwnerReference of the given type. +// Use Owns(object, builder.MatchEveryOwner) to reconcile all owners. +// +// By default, this is the equivalent of calling +// Watches(object, handler.EnqueueRequestForOwner([...], ownerType, OnlyControllerOwner())). func (blder *Builder) Owns(object client.Object, opts ...OwnsOption) *Builder { input := OwnsInput{object: object} for _, opt := range opts { @@ -122,10 +129,54 @@ type WatchesInput struct { objectProjection objectProjection } -// Watches exposes the lower-level ControllerManagedBy Watches functions through the builder. Consider using -// Owns or For instead of Watches directly. +// Watches defines the type of Object to watch, and configures the ControllerManagedBy to respond to create / delete / +// update events by *reconciling the object* with the given EventHandler. +// +// This is the equivalent of calling +// WatchesRawSource(source.Kind(scheme, object), eventhandler, opts...). +func (blder *Builder) Watches(object client.Object, eventhandler handler.EventHandler, opts ...WatchesOption) *Builder { + src := source.Kind(blder.mgr.GetCache(), object) + return blder.WatchesRawSource(src, eventhandler, opts...) +} + +// WatchesMetadata is the same as Watches, but forces the internal cache to only watch PartialObjectMetadata. +// +// This is useful when watching lots of objects, really big objects, or objects for which you only know +// the GVK, but not the structure. You'll need to pass metav1.PartialObjectMetadata to the client +// when fetching objects in your reconciler, otherwise you'll end up with a duplicate structured or unstructured cache. +// +// When watching a resource with metadata only, for example the v1.Pod, you should not Get and List using the v1.Pod type. +// Instead, you should use the special metav1.PartialObjectMetadata type. +// +// ❌ Incorrect: +// +// pod := &v1.Pod{} +// mgr.GetClient().Get(ctx, nsAndName, pod) +// +// ✅ Correct: +// +// pod := &metav1.PartialObjectMetadata{} +// pod.SetGroupVersionKind(schema.GroupVersionKind{ +// Group: "", +// Version: "v1", +// Kind: "Pod", +// }) +// mgr.GetClient().Get(ctx, nsAndName, pod) +// +// In the first case, controller-runtime will create another cache for the +// concrete type on top of the metadata cache; this increases memory +// consumption and leads to race conditions as caches are not in sync. +func (blder *Builder) WatchesMetadata(object client.Object, eventhandler handler.EventHandler, opts ...WatchesOption) *Builder { + opts = append(opts, OnlyMetadata) + return blder.Watches(object, eventhandler, opts...) +} + +// WatchesRawSource exposes the lower-level ControllerManagedBy Watches functions through the builder. // Specified predicates are registered only for given source. -func (blder *Builder) Watches(src source.Source, eventhandler handler.EventHandler, opts ...WatchesOption) *Builder { +// +// STOP! Consider using For(...), Owns(...), Watches(...), WatchesMetadata(...) instead. +// This method is only exposed for more advanced use cases, most users should use higher level functions. +func (blder *Builder) WatchesRawSource(src source.Source, eventhandler handler.EventHandler, opts ...WatchesOption) *Builder { input := WatchesInput{src: src, eventhandler: eventhandler} for _, opt := range opts { opt.ApplyToWatches(&input) @@ -217,11 +268,11 @@ func (blder *Builder) project(obj client.Object, proj objectProjection) (client. func (blder *Builder) doWatch() error { // Reconcile type if blder.forInput.object != nil { - typeForSrc, err := blder.project(blder.forInput.object, blder.forInput.objectProjection) + obj, err := blder.project(blder.forInput.object, blder.forInput.objectProjection) if err != nil { return err } - src := &source.Kind{Type: typeForSrc} + src := source.Kind(blder.mgr.GetCache(), obj) hdler := &handler.EnqueueRequestForObject{} allPredicates := append(blder.globalPredicates, blder.forInput.predicates...) if err := blder.ctrl.Watch(src, hdler, allPredicates...); err != nil { @@ -234,15 +285,20 @@ func (blder *Builder) doWatch() error { return errors.New("Owns() can only be used together with For()") } for _, own := range blder.ownsInput { - typeForSrc, err := blder.project(own.object, own.objectProjection) + obj, err := blder.project(own.object, own.objectProjection) if err != nil { return err } - src := &source.Kind{Type: typeForSrc} - hdler := &handler.EnqueueRequestForOwner{ - OwnerType: blder.forInput.object, - IsController: true, + src := source.Kind(blder.mgr.GetCache(), obj) + opts := []handler.OwnerOption{} + if !own.matchEveryOwner { + opts = append(opts, handler.OnlyControllerOwner()) } + hdler := handler.EnqueueRequestForOwner( + blder.mgr.GetScheme(), blder.mgr.GetRESTMapper(), + blder.forInput.object, + opts..., + ) allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...) allPredicates = append(allPredicates, own.predicates...) if err := blder.ctrl.Watch(src, hdler, allPredicates...); err != nil { @@ -258,8 +314,8 @@ func (blder *Builder) doWatch() error { allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...) allPredicates = append(allPredicates, w.predicates...) - // If the source of this watch is of type *source.Kind, project it. - if srckind, ok := w.src.(*source.Kind); ok { + // If the source of this watch is of type Kind, project it. + if srckind, ok := w.src.(*internalsource.Kind); ok { typeForSrc, err := blder.project(srckind.Type, w.objectProjection) if err != nil { return err @@ -314,8 +370,8 @@ func (blder *Builder) doController(r reconcile.Reconciler) error { } // Setup cache sync timeout. - if ctrlOptions.CacheSyncTimeout == 0 && globalOpts.CacheSyncTimeout != nil { - ctrlOptions.CacheSyncTimeout = *globalOpts.CacheSyncTimeout + if ctrlOptions.CacheSyncTimeout == 0 && globalOpts.CacheSyncTimeout > 0 { + ctrlOptions.CacheSyncTimeout = globalOpts.CacheSyncTimeout } controllerName, err := blder.getControllerName(gvk, hasGVK) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go index 3a66491bf..bce2065ef 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go @@ -138,3 +138,19 @@ var ( ) // }}} + +// MatchEveryOwner determines whether the watch should be filtered based on +// controller ownership. As in, when the OwnerReference.Controller field is set. +// +// If passed as an option, +// the handler receives notification for every owner of the object with the given type. +// If unset (default), the handler receives notification only for the first +// OwnerReference with `Controller: true`. +var MatchEveryOwner = &matchEveryOwner{} + +type matchEveryOwner struct{} + +// ApplyToOwns applies this configuration to the given OwnsInput options. +func (o matchEveryOwner) ApplyToOwns(opts *OwnsInput) { + opts.matchEveryOwner = true +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go index 534e6d64c..82fac4d8f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go @@ -22,9 +22,12 @@ import ( "net/url" "strings" + "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/rest" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -33,13 +36,14 @@ import ( // WebhookBuilder builds a Webhook. type WebhookBuilder struct { - apiType runtime.Object - withDefaulter admission.CustomDefaulter - withValidator admission.CustomValidator - gvk schema.GroupVersionKind - mgr manager.Manager - config *rest.Config - recoverPanic bool + apiType runtime.Object + withDefaulter admission.CustomDefaulter + withValidator admission.CustomValidator + gvk schema.GroupVersionKind + mgr manager.Manager + config *rest.Config + recoverPanic bool + logConstructor func(base logr.Logger, req *admission.Request) logr.Logger } // WebhookManagedBy allows inform its manager.Manager. @@ -69,6 +73,12 @@ func (blder *WebhookBuilder) WithValidator(validator admission.CustomValidator) return blder } +// WithLogConstructor overrides the webhook's LogConstructor. +func (blder *WebhookBuilder) WithLogConstructor(logConstructor func(base logr.Logger, req *admission.Request) logr.Logger) *WebhookBuilder { + blder.logConstructor = logConstructor + return blder +} + // RecoverPanic indicates whether the panic caused by webhook should be recovered. func (blder *WebhookBuilder) RecoverPanic() *WebhookBuilder { blder.recoverPanic = true @@ -80,6 +90,9 @@ func (blder *WebhookBuilder) Complete() error { // Set the Config blder.loadRestConfig() + // Configure the default LogConstructor + blder.setLogConstructor() + // Set the Webhook if needed return blder.registerWebhooks() } @@ -90,6 +103,26 @@ func (blder *WebhookBuilder) loadRestConfig() { } } +func (blder *WebhookBuilder) setLogConstructor() { + if blder.logConstructor == nil { + blder.logConstructor = func(base logr.Logger, req *admission.Request) logr.Logger { + log := base.WithValues( + "webhookGroup", blder.gvk.Group, + "webhookKind", blder.gvk.Kind, + ) + if req != nil { + return log.WithValues( + blder.gvk.Kind, klog.KRef(req.Namespace, req.Name), + "namespace", req.Namespace, "name", req.Name, + "resource", req.Resource, "user", req.UserInfo.Username, + "requestID", req.UID, + ) + } + return log + } + } +} + func (blder *WebhookBuilder) registerWebhooks() error { typ, err := blder.getType() if err != nil { @@ -116,6 +149,7 @@ func (blder *WebhookBuilder) registerWebhooks() error { func (blder *WebhookBuilder) registerDefaultingWebhook() { mwh := blder.getDefaultingWebhook() if mwh != nil { + mwh.LogConstructor = blder.logConstructor path := generateMutatePath(blder.gvk) // Checking if the path is already registered. @@ -131,10 +165,10 @@ func (blder *WebhookBuilder) registerDefaultingWebhook() { func (blder *WebhookBuilder) getDefaultingWebhook() *admission.Webhook { if defaulter := blder.withDefaulter; defaulter != nil { - return admission.WithCustomDefaulter(blder.apiType, defaulter).WithRecoverPanic(blder.recoverPanic) + return admission.WithCustomDefaulter(blder.mgr.GetScheme(), blder.apiType, defaulter).WithRecoverPanic(blder.recoverPanic) } if defaulter, ok := blder.apiType.(admission.Defaulter); ok { - return admission.DefaultingWebhookFor(defaulter).WithRecoverPanic(blder.recoverPanic) + return admission.DefaultingWebhookFor(blder.mgr.GetScheme(), defaulter).WithRecoverPanic(blder.recoverPanic) } log.Info( "skip registering a mutating webhook, object does not implement admission.Defaulter or WithDefaulter wasn't called", @@ -145,6 +179,7 @@ func (blder *WebhookBuilder) getDefaultingWebhook() *admission.Webhook { func (blder *WebhookBuilder) registerValidatingWebhook() { vwh := blder.getValidatingWebhook() if vwh != nil { + vwh.LogConstructor = blder.logConstructor path := generateValidatePath(blder.gvk) // Checking if the path is already registered. @@ -160,10 +195,10 @@ func (blder *WebhookBuilder) registerValidatingWebhook() { func (blder *WebhookBuilder) getValidatingWebhook() *admission.Webhook { if validator := blder.withValidator; validator != nil { - return admission.WithCustomValidator(blder.apiType, validator).WithRecoverPanic(blder.recoverPanic) + return admission.WithCustomValidator(blder.mgr.GetScheme(), blder.apiType, validator).WithRecoverPanic(blder.recoverPanic) } if validator, ok := blder.apiType.(admission.Validator); ok { - return admission.ValidatingWebhookFor(validator).WithRecoverPanic(blder.recoverPanic) + return admission.ValidatingWebhookFor(blder.mgr.GetScheme(), validator).WithRecoverPanic(blder.recoverPanic) } log.Info( "skip registering a validating webhook, object does not implement admission.Validator or WithValidator wasn't called", @@ -179,7 +214,7 @@ func (blder *WebhookBuilder) registerConversionWebhook() error { } if ok { if !blder.isAlreadyHandled("/convert") { - blder.mgr.GetWebhookServer().Register("/convert", &conversion.Webhook{}) + blder.mgr.GetWebhookServer().Register("/convert", conversion.NewWebhookHandler(blder.mgr.GetScheme())) } log.Info("Conversion webhook enabled", "GVK", blder.gvk) } @@ -195,10 +230,10 @@ func (blder *WebhookBuilder) getType() (runtime.Object, error) { } func (blder *WebhookBuilder) isAlreadyHandled(path string) bool { - if blder.mgr.GetWebhookServer().WebhookMux == nil { + if blder.mgr.GetWebhookServer().WebhookMux() == nil { return false } - h, p := blder.mgr.GetWebhookServer().WebhookMux.Handler(&http.Request{URL: &url.URL{Path: path}}) + h, p := blder.mgr.GetWebhookServer().WebhookMux().Handler(&http.Request{URL: &url.URL{Path: path}}) if p == path && h != nil { return true } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go index bcb1141a5..f01de4381 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go @@ -19,10 +19,11 @@ package cache import ( "context" "fmt" - "reflect" + "net/http" "time" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -37,7 +38,10 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/internal/log" ) -var log = logf.RuntimeLog.WithName("object-cache") +var ( + log = logf.RuntimeLog.WithName("object-cache") + defaultSyncPeriod = 10 * time.Hour +) // Cache knows how to load Kubernetes objects, fetch informers to request // to receive events for Kubernetes objects (at a low-level), @@ -98,310 +102,152 @@ type Informer interface { HasSynced() bool } -// ObjectSelector is an alias name of internal.Selector. -type ObjectSelector internal.Selector - -// SelectorsByObject associate a client.Object's GVK to a field/label selector. -// There is also `DefaultSelector` to set a global default (which will be overridden by -// a more specific setting here, if any). -type SelectorsByObject map[client.Object]ObjectSelector - // Options are the optional arguments for creating a new InformersMap object. type Options struct { + // HTTPClient is the http client to use for the REST client + HTTPClient *http.Client + // Scheme is the scheme to use for mapping objects to GroupVersionKinds Scheme *runtime.Scheme // Mapper is the RESTMapper to use for mapping GroupVersionKinds to Resources Mapper meta.RESTMapper - // Resync is the base frequency the informers are resynced. - // Defaults to defaultResyncTime. - // A 10 percent jitter will be added to the Resync period between informers - // So that all informers will not send list requests simultaneously. - Resync *time.Duration - - // Namespace restricts the cache's ListWatch to the desired namespace + // SyncPeriod determines the minimum frequency at which watched resources are + // reconciled. A lower period will correct entropy more quickly, but reduce + // responsiveness to change if there are many watched resources. Change this + // value only if you know what you are doing. Defaults to 10 hours if unset. + // there will a 10 percent jitter between the SyncPeriod of all controllers + // so that all controllers will not send list requests simultaneously. + // + // This applies to all controllers. + // + // A period sync happens for two reasons: + // 1. To insure against a bug in the controller that causes an object to not + // be requeued, when it otherwise should be requeued. + // 2. To insure against an unknown bug in controller-runtime, or its dependencies, + // that causes an object to not be requeued, when it otherwise should be + // requeued, or to be removed from the queue, when it otherwise should not + // be removed. + // + // If you want + // 1. to insure against missed watch events, or + // 2. to poll services that cannot be watched, + // then we recommend that, instead of changing the default period, the + // controller requeue, with a constant duration `t`, whenever the controller + // is "done" with an object, and would otherwise not requeue it, i.e., we + // recommend the `Reconcile` function return `reconcile.Result{RequeueAfter: t}`, + // instead of `reconcile.Result{}`. + SyncPeriod *time.Duration + + // Namespaces restricts the cache's ListWatch to the desired namespaces // Default watches all namespaces - Namespace string + Namespaces []string - // SelectorsByObject restricts the cache's ListWatch to the desired - // fields per GVK at the specified object, the map's value must implement - // Selector [1] using for example a Set [2] - // [1] https://pkg.go.dev/k8s.io/apimachinery/pkg/fields#Selector - // [2] https://pkg.go.dev/k8s.io/apimachinery/pkg/fields#Set - SelectorsByObject SelectorsByObject + // DefaultLabelSelector will be used as a label selectors for all object types + // unless they have a more specific selector set in ByObject. + DefaultLabelSelector labels.Selector - // DefaultSelector will be used as selectors for all object types - // that do not have a selector in SelectorsByObject defined. - DefaultSelector ObjectSelector + // DefaultFieldSelector will be used as a field selectors for all object types + // unless they have a more specific selector set in ByObject. + DefaultFieldSelector fields.Selector - // UnsafeDisableDeepCopyByObject indicates not to deep copy objects during get or - // list objects per GVK at the specified object. + // DefaultTransform will be used as transform for all object types + // unless they have a more specific transform set in ByObject. + DefaultTransform toolscache.TransformFunc + + // ByObject restricts the cache's ListWatch to the desired fields per GVK at the specified object. + ByObject map[client.Object]ByObject + + // UnsafeDisableDeepCopy indicates not to deep copy objects during get or + // list objects for EVERY object. // Be very careful with this, when enabled you must DeepCopy any object before mutating it, // otherwise you will mutate the object in the cache. - UnsafeDisableDeepCopyByObject DisableDeepCopyByObject + // + // This is a global setting for all objects, and can be overridden by the ByObject setting. + UnsafeDisableDeepCopy *bool +} - // TransformByObject is a map from GVKs to transformer functions which +// ByObject offers more fine-grained control over the cache's ListWatch by object. +type ByObject struct { + // Label represents a label selector for the object. + Label labels.Selector + + // Field represents a field selector for the object. + Field fields.Selector + + // Transform is a map from objects to transformer functions which // get applied when objects of the transformation are about to be committed // to cache. // // This function is called both for new objects to enter the cache, - // and for updated objects. - TransformByObject TransformByObject + // and for updated objects. + Transform toolscache.TransformFunc - // DefaultTransform is the transform used for all GVKs which do - // not have an explicit transform func set in TransformByObject - DefaultTransform toolscache.TransformFunc + // UnsafeDisableDeepCopy indicates not to deep copy objects during get or + // list objects per GVK at the specified object. + // Be very careful with this, when enabled you must DeepCopy any object before mutating it, + // otherwise you will mutate the object in the cache. + UnsafeDisableDeepCopy *bool } -var defaultResyncTime = 10 * time.Hour +// NewCacheFunc - Function for creating a new cache from the options and a rest config. +type NewCacheFunc func(config *rest.Config, opts Options) (Cache, error) // New initializes and returns a new Cache. func New(config *rest.Config, opts Options) (Cache, error) { - opts, err := defaultOpts(config, opts) - if err != nil { - return nil, err + if len(opts.Namespaces) == 0 { + opts.Namespaces = []string{metav1.NamespaceAll} } - selectorsByGVK, err := convertToByGVK(opts.SelectorsByObject, opts.DefaultSelector, opts.Scheme) - if err != nil { - return nil, err - } - disableDeepCopyByGVK, err := convertToDisableDeepCopyByGVK(opts.UnsafeDisableDeepCopyByObject, opts.Scheme) - if err != nil { - return nil, err - } - transformByGVK, err := convertToByGVK(opts.TransformByObject, opts.DefaultTransform, opts.Scheme) - if err != nil { - return nil, err - } - transformByObj := internal.TransformFuncByObjectFromMap(transformByGVK) - - internalSelectorsByGVK := internal.SelectorsByGVK{} - for gvk, selector := range selectorsByGVK { - internalSelectorsByGVK[gvk] = internal.Selector(selector) - } - - im := internal.NewInformersMap(config, opts.Scheme, opts.Mapper, *opts.Resync, opts.Namespace, internalSelectorsByGVK, disableDeepCopyByGVK, transformByObj) - return &informerCache{InformersMap: im}, nil -} - -// BuilderWithOptions returns a Cache constructor that will build a cache -// honoring the options argument, this is useful to specify options like -// SelectorsByObject -// WARNING: If SelectorsByObject is specified, filtered out resources are not -// returned. -// WARNING: If UnsafeDisableDeepCopy is enabled, you must DeepCopy any object -// returned from cache get/list before mutating it. -func BuilderWithOptions(options Options) NewCacheFunc { - return func(config *rest.Config, inherited Options) (Cache, error) { - var err error - inherited, err = defaultOpts(config, inherited) - if err != nil { - return nil, err - } - options, err = defaultOpts(config, options) - if err != nil { - return nil, err - } - combined, err := options.inheritFrom(inherited) - if err != nil { - return nil, err - } - return New(config, *combined) + if len(opts.Namespaces) > 1 { + return newMultiNamespaceCache(config, opts) } -} -func (options Options) inheritFrom(inherited Options) (*Options, error) { - var ( - combined Options - err error - ) - combined.Scheme = combineScheme(inherited.Scheme, options.Scheme) - combined.Mapper = selectMapper(inherited.Mapper, options.Mapper) - combined.Resync = selectResync(inherited.Resync, options.Resync) - combined.Namespace = selectNamespace(inherited.Namespace, options.Namespace) - combined.SelectorsByObject, combined.DefaultSelector, err = combineSelectors(inherited, options, combined.Scheme) - if err != nil { - return nil, err - } - combined.UnsafeDisableDeepCopyByObject, err = combineUnsafeDeepCopy(inherited, options, combined.Scheme) - if err != nil { - return nil, err - } - combined.TransformByObject, combined.DefaultTransform, err = combineTransforms(inherited, options, combined.Scheme) + opts, err := defaultOpts(config, opts) if err != nil { return nil, err } - return &combined, nil -} - -func combineScheme(schemes ...*runtime.Scheme) *runtime.Scheme { - var out *runtime.Scheme - for _, sch := range schemes { - if sch == nil { - continue - } - for gvk, t := range sch.AllKnownTypes() { - if out == nil { - out = runtime.NewScheme() - } - out.AddKnownTypeWithName(gvk, reflect.New(t).Interface().(runtime.Object)) - } - } - return out -} - -func selectMapper(def, override meta.RESTMapper) meta.RESTMapper { - if override != nil { - return override - } - return def -} - -func selectResync(def, override *time.Duration) *time.Duration { - if override != nil { - return override - } - return def -} - -func selectNamespace(def, override string) string { - if override != "" { - return override - } - return def -} - -func combineSelectors(inherited, options Options, scheme *runtime.Scheme) (SelectorsByObject, ObjectSelector, error) { - // Selectors are combined via logical AND. - // - Combined label selector is a union of the selectors requirements from both sets of options. - // - Combined field selector uses fields.AndSelectors with the combined list of non-nil field selectors - // defined in both sets of options. - // - // There is a bunch of complexity here because we need to convert to SelectorsByGVK - // to be able to match keys between options and inherited and then convert back to SelectorsByObject - optionsSelectorsByGVK, err := convertToByGVK(options.SelectorsByObject, options.DefaultSelector, scheme) - if err != nil { - return nil, ObjectSelector{}, err - } - inheritedSelectorsByGVK, err := convertToByGVK(inherited.SelectorsByObject, inherited.DefaultSelector, inherited.Scheme) - if err != nil { - return nil, ObjectSelector{}, err - } - - for gvk, inheritedSelector := range inheritedSelectorsByGVK { - optionsSelectorsByGVK[gvk] = combineSelector(inheritedSelector, optionsSelectorsByGVK[gvk]) - } - return convertToByObject(optionsSelectorsByGVK, scheme) -} - -func combineSelector(selectors ...ObjectSelector) ObjectSelector { - ls := make([]labels.Selector, 0, len(selectors)) - fs := make([]fields.Selector, 0, len(selectors)) - for _, s := range selectors { - ls = append(ls, s.Label) - fs = append(fs, s.Field) - } - return ObjectSelector{ - Label: combineLabelSelectors(ls...), - Field: combineFieldSelectors(fs...), - } -} - -func combineLabelSelectors(ls ...labels.Selector) labels.Selector { - var combined labels.Selector - for _, l := range ls { - if l == nil { - continue - } - if combined == nil { - combined = labels.NewSelector() - } - reqs, _ := l.Requirements() - combined = combined.Add(reqs...) - } - return combined -} -func combineFieldSelectors(fs ...fields.Selector) fields.Selector { - nonNil := fs[:0] - for _, f := range fs { - if f == nil { - continue - } - nonNil = append(nonNil, f) - } - if len(nonNil) == 0 { - return nil - } - if len(nonNil) == 1 { - return nonNil[0] - } - return fields.AndSelectors(nonNil...) -} - -func combineUnsafeDeepCopy(inherited, options Options, scheme *runtime.Scheme) (DisableDeepCopyByObject, error) { - // UnsafeDisableDeepCopyByObject is combined via precedence. Only if a value for a particular GVK is unset - // in options will a value from inherited be used. - optionsDisableDeepCopyByGVK, err := convertToDisableDeepCopyByGVK(options.UnsafeDisableDeepCopyByObject, options.Scheme) - if err != nil { - return nil, err - } - inheritedDisableDeepCopyByGVK, err := convertToDisableDeepCopyByGVK(inherited.UnsafeDisableDeepCopyByObject, inherited.Scheme) + byGVK, err := convertToInformerOptsByGVK(opts.ByObject, opts.Scheme) if err != nil { return nil, err } - - for gvk, inheritedDeepCopy := range inheritedDisableDeepCopyByGVK { - if _, ok := optionsDisableDeepCopyByGVK[gvk]; !ok { - if optionsDisableDeepCopyByGVK == nil { - optionsDisableDeepCopyByGVK = map[schema.GroupVersionKind]bool{} - } - optionsDisableDeepCopyByGVK[gvk] = inheritedDeepCopy - } - } - return convertToDisableDeepCopyByObject(optionsDisableDeepCopyByGVK, scheme) + // Set the default selector and transform. + byGVK[schema.GroupVersionKind{}] = internal.InformersOptsByGVK{ + Selector: internal.Selector{ + Label: opts.DefaultLabelSelector, + Field: opts.DefaultFieldSelector, + }, + Transform: opts.DefaultTransform, + UnsafeDisableDeepCopy: opts.UnsafeDisableDeepCopy, + } + + return &informerCache{ + scheme: opts.Scheme, + Informers: internal.NewInformers(config, &internal.InformersOpts{ + HTTPClient: opts.HTTPClient, + Scheme: opts.Scheme, + Mapper: opts.Mapper, + ResyncPeriod: *opts.SyncPeriod, + Namespace: opts.Namespaces[0], + ByGVK: byGVK, + }), + }, nil } -func combineTransforms(inherited, options Options, scheme *runtime.Scheme) (TransformByObject, toolscache.TransformFunc, error) { - // Transform functions are combined via chaining. If both inherited and options define a transform - // function, the transform function from inherited will be called first, and the transform function from - // options will be called second. - optionsTransformByGVK, err := convertToByGVK(options.TransformByObject, options.DefaultTransform, options.Scheme) - if err != nil { - return nil, nil, err - } - inheritedTransformByGVK, err := convertToByGVK(inherited.TransformByObject, inherited.DefaultTransform, inherited.Scheme) - if err != nil { - return nil, nil, err - } - - for gvk, inheritedTransform := range inheritedTransformByGVK { - if optionsTransformByGVK == nil { - optionsTransformByGVK = map[schema.GroupVersionKind]toolscache.TransformFunc{} - } - optionsTransformByGVK[gvk] = combineTransform(inheritedTransform, optionsTransformByGVK[gvk]) - } - return convertToByObject(optionsTransformByGVK, scheme) -} +func defaultOpts(config *rest.Config, opts Options) (Options, error) { + logger := log.WithName("setup") -func combineTransform(inherited, current toolscache.TransformFunc) toolscache.TransformFunc { - if inherited == nil { - return current - } - if current == nil { - return inherited - } - return func(in interface{}) (interface{}, error) { - mid, err := inherited(in) + // Use the rest HTTP client for the provided config if unset + if opts.HTTPClient == nil { + var err error + opts.HTTPClient, err = rest.HTTPClientFor(config) if err != nil { - return nil, err + logger.Error(err, "Failed to get HTTP client") + return opts, fmt.Errorf("could not create HTTP client from config: %w", err) } - return current(mid) } -} -func defaultOpts(config *rest.Config, opts Options) (Options, error) { // Use the default Kubernetes Scheme if unset if opts.Scheme == nil { opts.Scheme = scheme.Scheme @@ -410,108 +256,38 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { // Construct a new Mapper if unset if opts.Mapper == nil { var err error - opts.Mapper, err = apiutil.NewDiscoveryRESTMapper(config) + opts.Mapper, err = apiutil.NewDiscoveryRESTMapper(config, opts.HTTPClient) if err != nil { - log.WithName("setup").Error(err, "Failed to get API Group-Resources") - return opts, fmt.Errorf("could not create RESTMapper from config") + logger.Error(err, "Failed to get API Group-Resources") + return opts, fmt.Errorf("could not create RESTMapper from config: %w", err) } } // Default the resync period to 10 hours if unset - if opts.Resync == nil { - opts.Resync = &defaultResyncTime + if opts.SyncPeriod == nil { + opts.SyncPeriod = &defaultSyncPeriod } return opts, nil } -func convertToByGVK[T any](byObject map[client.Object]T, def T, scheme *runtime.Scheme) (map[schema.GroupVersionKind]T, error) { - byGVK := map[schema.GroupVersionKind]T{} - for object, value := range byObject { +func convertToInformerOptsByGVK(in map[client.Object]ByObject, scheme *runtime.Scheme) (map[schema.GroupVersionKind]internal.InformersOptsByGVK, error) { + out := map[schema.GroupVersionKind]internal.InformersOptsByGVK{} + for object, byObject := range in { gvk, err := apiutil.GVKForObject(object, scheme) if err != nil { return nil, err } - byGVK[gvk] = value - } - byGVK[schema.GroupVersionKind{}] = def - return byGVK, nil -} - -func convertToByObject[T any](byGVK map[schema.GroupVersionKind]T, scheme *runtime.Scheme) (map[client.Object]T, T, error) { - var byObject map[client.Object]T - def := byGVK[schema.GroupVersionKind{}] - for gvk, value := range byGVK { - if gvk == (schema.GroupVersionKind{}) { - continue + if _, ok := out[gvk]; ok { + return nil, fmt.Errorf("duplicate cache options for GVK %v, cache.Options.ByObject has multiple types with the same GroupVersionKind", gvk) } - obj, err := scheme.New(gvk) - if err != nil { - return nil, def, err + out[gvk] = internal.InformersOptsByGVK{ + Selector: internal.Selector{ + Field: byObject.Field, + Label: byObject.Label, + }, + Transform: byObject.Transform, + UnsafeDisableDeepCopy: byObject.UnsafeDisableDeepCopy, } - cObj, ok := obj.(client.Object) - if !ok { - return nil, def, fmt.Errorf("object %T for GVK %q does not implement client.Object", obj, gvk) - } - cObj.GetObjectKind().SetGroupVersionKind(gvk) - if byObject == nil { - byObject = map[client.Object]T{} - } - byObject[cObj] = value } - return byObject, def, nil + return out, nil } - -// DisableDeepCopyByObject associate a client.Object's GVK to disable DeepCopy during get or list from cache. -type DisableDeepCopyByObject map[client.Object]bool - -var _ client.Object = &ObjectAll{} - -// ObjectAll is the argument to represent all objects' types. -type ObjectAll struct { - client.Object -} - -func convertToDisableDeepCopyByGVK(disableDeepCopyByObject DisableDeepCopyByObject, scheme *runtime.Scheme) (internal.DisableDeepCopyByGVK, error) { - disableDeepCopyByGVK := internal.DisableDeepCopyByGVK{} - for obj, disable := range disableDeepCopyByObject { - switch obj.(type) { - case ObjectAll, *ObjectAll: - disableDeepCopyByGVK[internal.GroupVersionKindAll] = disable - default: - gvk, err := apiutil.GVKForObject(obj, scheme) - if err != nil { - return nil, err - } - disableDeepCopyByGVK[gvk] = disable - } - } - return disableDeepCopyByGVK, nil -} - -func convertToDisableDeepCopyByObject(byGVK internal.DisableDeepCopyByGVK, scheme *runtime.Scheme) (DisableDeepCopyByObject, error) { - var byObject DisableDeepCopyByObject - for gvk, value := range byGVK { - if byObject == nil { - byObject = DisableDeepCopyByObject{} - } - if gvk == (schema.GroupVersionKind{}) { - byObject[ObjectAll{}] = value - continue - } - obj, err := scheme.New(gvk) - if err != nil { - return nil, err - } - cObj, ok := obj.(client.Object) - if !ok { - return nil, fmt.Errorf("object %T for GVK %q does not implement client.Object", obj, gvk) - } - - byObject[cObj] = value - } - return byObject, nil -} - -// TransformByObject associate a client.Object's GVK to a transformer function -// to be applied when storing the object into the cache. -type TransformByObject map[client.Object]toolscache.TransformFunc diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go index 08e4e6df5..771244d52 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go @@ -19,10 +19,10 @@ package cache import ( "context" "fmt" - "reflect" "strings" apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -45,19 +45,21 @@ func (*ErrCacheNotStarted) Error() string { return "the cache is not started, can not read objects" } -// informerCache is a Kubernetes Object cache populated from InformersMap. informerCache wraps an InformersMap. +// informerCache is a Kubernetes Object cache populated from internal.Informers. +// informerCache wraps internal.Informers. type informerCache struct { - *internal.InformersMap + scheme *runtime.Scheme + *internal.Informers } // Get implements Reader. -func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out client.Object, opts ...client.GetOption) error { - gvk, err := apiutil.GVKForObject(out, ip.Scheme) +func (ic *informerCache) Get(ctx context.Context, key client.ObjectKey, out client.Object, opts ...client.GetOption) error { + gvk, err := apiutil.GVKForObject(out, ic.scheme) if err != nil { return err } - started, cache, err := ip.InformersMap.Get(ctx, gvk, out) + started, cache, err := ic.Informers.Get(ctx, gvk, out) if err != nil { return err } @@ -69,13 +71,13 @@ func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out clie } // List implements Reader. -func (ip *informerCache) List(ctx context.Context, out client.ObjectList, opts ...client.ListOption) error { - gvk, cacheTypeObj, err := ip.objectTypeForListObject(out) +func (ic *informerCache) List(ctx context.Context, out client.ObjectList, opts ...client.ListOption) error { + gvk, cacheTypeObj, err := ic.objectTypeForListObject(out) if err != nil { return err } - started, cache, err := ip.InformersMap.Get(ctx, *gvk, cacheTypeObj) + started, cache, err := ic.Informers.Get(ctx, *gvk, cacheTypeObj) if err != nil { return err } @@ -90,54 +92,46 @@ func (ip *informerCache) List(ctx context.Context, out client.ObjectList, opts . // objectTypeForListObject tries to find the runtime.Object and associated GVK // for a single object corresponding to the passed-in list type. We need them // because they are used as cache map key. -func (ip *informerCache) objectTypeForListObject(list client.ObjectList) (*schema.GroupVersionKind, runtime.Object, error) { - gvk, err := apiutil.GVKForObject(list, ip.Scheme) +func (ic *informerCache) objectTypeForListObject(list client.ObjectList) (*schema.GroupVersionKind, runtime.Object, error) { + gvk, err := apiutil.GVKForObject(list, ic.scheme) if err != nil { return nil, nil, err } - // we need the non-list GVK, so chop off the "List" from the end of the kind - if strings.HasSuffix(gvk.Kind, "List") && apimeta.IsListType(list) { - gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] - } + // We need the non-list GVK, so chop off the "List" from the end of the kind. + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") - _, isUnstructured := list.(*unstructured.UnstructuredList) - var cacheTypeObj runtime.Object - if isUnstructured { + // Handle unstructured.UnstructuredList. + if _, isUnstructured := list.(runtime.Unstructured); isUnstructured { u := &unstructured.Unstructured{} u.SetGroupVersionKind(gvk) - cacheTypeObj = u - } else { - itemsPtr, err := apimeta.GetItemsPtr(list) - if err != nil { - return nil, nil, err - } - // http://knowyourmeme.com/memes/this-is-fine - elemType := reflect.Indirect(reflect.ValueOf(itemsPtr)).Type().Elem() - if elemType.Kind() != reflect.Ptr { - elemType = reflect.PtrTo(elemType) - } - - cacheTypeValue := reflect.Zero(elemType) - var ok bool - cacheTypeObj, ok = cacheTypeValue.Interface().(runtime.Object) - if !ok { - return nil, nil, fmt.Errorf("cannot get cache for %T, its element %T is not a runtime.Object", list, cacheTypeValue.Interface()) - } + return &gvk, u, nil + } + // Handle metav1.PartialObjectMetadataList. + if _, isPartialObjectMetadata := list.(*metav1.PartialObjectMetadataList); isPartialObjectMetadata { + pom := &metav1.PartialObjectMetadata{} + pom.SetGroupVersionKind(gvk) + return &gvk, pom, nil } + // Any other list type should have a corresponding non-list type registered + // in the scheme. Use that to create a new instance of the non-list type. + cacheTypeObj, err := ic.scheme.New(gvk) + if err != nil { + return nil, nil, err + } return &gvk, cacheTypeObj, nil } // GetInformerForKind returns the informer for the GroupVersionKind. -func (ip *informerCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) { +func (ic *informerCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) { // Map the gvk to an object - obj, err := ip.Scheme.New(gvk) + obj, err := ic.scheme.New(gvk) if err != nil { return nil, err } - _, i, err := ip.InformersMap.Get(ctx, gvk, obj) + _, i, err := ic.Informers.Get(ctx, gvk, obj) if err != nil { return nil, err } @@ -145,13 +139,13 @@ func (ip *informerCache) GetInformerForKind(ctx context.Context, gvk schema.Grou } // GetInformer returns the informer for the obj. -func (ip *informerCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { - gvk, err := apiutil.GVKForObject(obj, ip.Scheme) +func (ic *informerCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { + gvk, err := apiutil.GVKForObject(obj, ic.scheme) if err != nil { return nil, err } - _, i, err := ip.InformersMap.Get(ctx, gvk, obj) + _, i, err := ic.Informers.Get(ctx, gvk, obj) if err != nil { return nil, err } @@ -160,7 +154,7 @@ func (ip *informerCache) GetInformer(ctx context.Context, obj client.Object) (In // NeedLeaderElection implements the LeaderElectionRunnable interface // to indicate that this can be started without requiring the leader lock. -func (ip *informerCache) NeedLeaderElection() bool { +func (ic *informerCache) NeedLeaderElection() bool { return false } @@ -169,8 +163,8 @@ func (ip *informerCache) NeedLeaderElection() bool { // to List. For one-to-one compatibility with "normal" field selectors, only return one value. // The values may be anything. They will automatically be prefixed with the namespace of the // given object, if present. The objects passed are guaranteed to be objects of the correct type. -func (ip *informerCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { - informer, err := ip.GetInformer(ctx, obj) +func (ic *informerCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { + informer, err := ic.GetInformer(ctx, obj) if err != nil { return err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go index f78b08338..3c8355bbd 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go @@ -27,9 +27,9 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/tools/cache" - "sigs.k8s.io/controller-runtime/pkg/internal/field/selector" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/internal/field/selector" ) // CacheReader is a client.Reader. @@ -147,7 +147,7 @@ func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...cli } obj, isObj := item.(runtime.Object) if !isObj { - return fmt.Errorf("cache contained %T, which is not an Object", obj) + return fmt.Errorf("cache contained %T, which is not an Object", item) } meta, err := apimeta.Accessor(obj) if err != nil { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go deleted file mode 100644 index 27f46e327..000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import ( - "context" - "time" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" -) - -// InformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. -// It uses a standard parameter codec constructed based on the given generated Scheme. -type InformersMap struct { - // we abstract over the details of structured/unstructured/metadata with the specificInformerMaps - // TODO(directxman12): genericize this over different projections now that we have 3 different maps - - structured *specificInformersMap - unstructured *specificInformersMap - metadata *specificInformersMap - - // Scheme maps runtime.Objects to GroupVersionKinds - Scheme *runtime.Scheme -} - -// NewInformersMap creates a new InformersMap that can create informers for -// both structured and unstructured objects. -func NewInformersMap(config *rest.Config, - scheme *runtime.Scheme, - mapper meta.RESTMapper, - resync time.Duration, - namespace string, - selectors SelectorsByGVK, - disableDeepCopy DisableDeepCopyByGVK, - transformers TransformFuncByObject, -) *InformersMap { - return &InformersMap{ - structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), - unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), - metadata: newMetadataInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), - - Scheme: scheme, - } -} - -// Start calls Run on each of the informers and sets started to true. Blocks on the context. -func (m *InformersMap) Start(ctx context.Context) error { - go m.structured.Start(ctx) - go m.unstructured.Start(ctx) - go m.metadata.Start(ctx) - <-ctx.Done() - return nil -} - -// WaitForCacheSync waits until all the caches have been started and synced. -func (m *InformersMap) WaitForCacheSync(ctx context.Context) bool { - syncedFuncs := append([]cache.InformerSynced(nil), m.structured.HasSyncedFuncs()...) - syncedFuncs = append(syncedFuncs, m.unstructured.HasSyncedFuncs()...) - syncedFuncs = append(syncedFuncs, m.metadata.HasSyncedFuncs()...) - - if !m.structured.waitForStarted(ctx) { - return false - } - if !m.unstructured.waitForStarted(ctx) { - return false - } - if !m.metadata.waitForStarted(ctx) { - return false - } - return cache.WaitForCacheSync(ctx.Done(), syncedFuncs...) -} - -// Get will create a new Informer and add it to the map of InformersMap if none exists. Returns -// the Informer from the map. -func (m *InformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) { - switch obj.(type) { - case *unstructured.Unstructured: - return m.unstructured.Get(ctx, gvk, obj) - case *unstructured.UnstructuredList: - return m.unstructured.Get(ctx, gvk, obj) - case *metav1.PartialObjectMetadata: - return m.metadata.Get(ctx, gvk, obj) - case *metav1.PartialObjectMetadataList: - return m.metadata.Get(ctx, gvk, obj) - default: - return m.structured.Get(ctx, gvk, obj) - } -} - -// newStructuredInformersMap creates a new InformersMap for structured objects. -func newStructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, - namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { - return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createStructuredListWatch) -} - -// newUnstructuredInformersMap creates a new InformersMap for unstructured objects. -func newUnstructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, - namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { - return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createUnstructuredListWatch) -} - -// newMetadataInformersMap creates a new InformersMap for metadata-only objects. -func newMetadataInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, - namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { - return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createMetadataListWatch) -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go deleted file mode 100644 index 54bd7eec9..000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import "k8s.io/apimachinery/pkg/runtime/schema" - -// GroupVersionKindAll is the argument to represent all GroupVersionKind types. -var GroupVersionKindAll = schema.GroupVersionKind{} - -// DisableDeepCopyByGVK associate a GroupVersionKind to disable DeepCopy during get or list from cache. -type DisableDeepCopyByGVK map[schema.GroupVersionKind]bool - -// IsDisabled returns whether a GroupVersionKind is disabled DeepCopy. -func (disableByGVK DisableDeepCopyByGVK) IsDisabled(gvk schema.GroupVersionKind) bool { - if d, ok := disableByGVK[gvk]; ok { - return d - } else if d, ok = disableByGVK[GroupVersionKindAll]; ok { - return d - } - return false -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go new file mode 100644 index 000000000..09e011111 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go @@ -0,0 +1,560 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "fmt" + "math/rand" + "net/http" + "sync" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/metadata" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// InformersOpts configures an InformerMap. +type InformersOpts struct { + HTTPClient *http.Client + Scheme *runtime.Scheme + Mapper meta.RESTMapper + ResyncPeriod time.Duration + Namespace string + ByGVK map[schema.GroupVersionKind]InformersOptsByGVK +} + +// InformersOptsByGVK configured additional by group version kind (or object) +// in an InformerMap. +type InformersOptsByGVK struct { + Selector Selector + Transform cache.TransformFunc + UnsafeDisableDeepCopy *bool +} + +// NewInformers creates a new InformersMap that can create informers under the hood. +func NewInformers(config *rest.Config, options *InformersOpts) *Informers { + return &Informers{ + config: config, + httpClient: options.HTTPClient, + scheme: options.Scheme, + mapper: options.Mapper, + tracker: tracker{ + Structured: make(map[schema.GroupVersionKind]*Cache), + Unstructured: make(map[schema.GroupVersionKind]*Cache), + Metadata: make(map[schema.GroupVersionKind]*Cache), + }, + codecs: serializer.NewCodecFactory(options.Scheme), + paramCodec: runtime.NewParameterCodec(options.Scheme), + resync: options.ResyncPeriod, + startWait: make(chan struct{}), + namespace: options.Namespace, + byGVK: options.ByGVK, + } +} + +// Cache contains the cached data for an Cache. +type Cache struct { + // Informer is the cached informer + Informer cache.SharedIndexInformer + + // CacheReader wraps Informer and implements the CacheReader interface for a single type + Reader CacheReader +} + +type tracker struct { + Structured map[schema.GroupVersionKind]*Cache + Unstructured map[schema.GroupVersionKind]*Cache + Metadata map[schema.GroupVersionKind]*Cache +} + +// Informers create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. +// It uses a standard parameter codec constructed based on the given generated Scheme. +type Informers struct { + // httpClient is used to create a new REST client + httpClient *http.Client + + // scheme maps runtime.Objects to GroupVersionKinds + scheme *runtime.Scheme + + // config is used to talk to the apiserver + config *rest.Config + + // mapper maps GroupVersionKinds to Resources + mapper meta.RESTMapper + + // tracker tracks informers keyed by their type and groupVersionKind + tracker tracker + + // codecs is used to create a new REST client + codecs serializer.CodecFactory + + // paramCodec is used by list and watch + paramCodec runtime.ParameterCodec + + // resync is the base frequency the informers are resynced + // a 10 percent jitter will be added to the resync period between informers + // so that all informers will not send list requests simultaneously. + resync time.Duration + + // mu guards access to the map + mu sync.RWMutex + + // started is true if the informers have been started + started bool + + // startWait is a channel that is closed after the + // informer has been started. + startWait chan struct{} + + // waitGroup is the wait group that is used to wait for all informers to stop + waitGroup sync.WaitGroup + + // stopped is true if the informers have been stopped + stopped bool + + // ctx is the context to stop informers + ctx context.Context + + // namespace is the namespace that all ListWatches are restricted to + // default or empty string means all namespaces + namespace string + + byGVK map[schema.GroupVersionKind]InformersOptsByGVK +} + +func (ip *Informers) getSelector(gvk schema.GroupVersionKind) Selector { + if ip.byGVK == nil { + return Selector{} + } + if res, ok := ip.byGVK[gvk]; ok { + return res.Selector + } + if res, ok := ip.byGVK[schema.GroupVersionKind{}]; ok { + return res.Selector + } + return Selector{} +} + +func (ip *Informers) getTransform(gvk schema.GroupVersionKind) cache.TransformFunc { + if ip.byGVK == nil { + return nil + } + if res, ok := ip.byGVK[gvk]; ok { + return res.Transform + } + if res, ok := ip.byGVK[schema.GroupVersionKind{}]; ok { + return res.Transform + } + return nil +} + +func (ip *Informers) getDisableDeepCopy(gvk schema.GroupVersionKind) bool { + if ip.byGVK == nil { + return false + } + if res, ok := ip.byGVK[gvk]; ok && res.UnsafeDisableDeepCopy != nil { + return *res.UnsafeDisableDeepCopy + } + if res, ok := ip.byGVK[schema.GroupVersionKind{}]; ok && res.UnsafeDisableDeepCopy != nil { + return *res.UnsafeDisableDeepCopy + } + return false +} + +// Start calls Run on each of the informers and sets started to true. Blocks on the context. +// It doesn't return start because it can't return an error, and it's not a runnable directly. +func (ip *Informers) Start(ctx context.Context) error { + func() { + ip.mu.Lock() + defer ip.mu.Unlock() + + // Set the context so it can be passed to informers that are added later + ip.ctx = ctx + + // Start each informer + for _, i := range ip.tracker.Structured { + ip.startInformerLocked(i.Informer) + } + for _, i := range ip.tracker.Unstructured { + ip.startInformerLocked(i.Informer) + } + for _, i := range ip.tracker.Metadata { + ip.startInformerLocked(i.Informer) + } + + // Set started to true so we immediately start any informers added later. + ip.started = true + close(ip.startWait) + }() + <-ctx.Done() // Block until the context is done + ip.mu.Lock() + ip.stopped = true // Set stopped to true so we don't start any new informers + ip.mu.Unlock() + ip.waitGroup.Wait() // Block until all informers have stopped + return nil +} + +func (ip *Informers) startInformerLocked(informer cache.SharedIndexInformer) { + // Don't start the informer in case we are already waiting for the items in + // the waitGroup to finish, since waitGroups don't support waiting and adding + // at the same time. + if ip.stopped { + return + } + + ip.waitGroup.Add(1) + go func() { + defer ip.waitGroup.Done() + informer.Run(ip.ctx.Done()) + }() +} + +func (ip *Informers) waitForStarted(ctx context.Context) bool { + select { + case <-ip.startWait: + return true + case <-ctx.Done(): + return false + } +} + +// getHasSyncedFuncs returns all the HasSynced functions for the informers in this map. +func (ip *Informers) getHasSyncedFuncs() []cache.InformerSynced { + ip.mu.RLock() + defer ip.mu.RUnlock() + + res := make([]cache.InformerSynced, 0, + len(ip.tracker.Structured)+len(ip.tracker.Unstructured)+len(ip.tracker.Metadata), + ) + for _, i := range ip.tracker.Structured { + res = append(res, i.Informer.HasSynced) + } + for _, i := range ip.tracker.Unstructured { + res = append(res, i.Informer.HasSynced) + } + for _, i := range ip.tracker.Metadata { + res = append(res, i.Informer.HasSynced) + } + return res +} + +// WaitForCacheSync waits until all the caches have been started and synced. +func (ip *Informers) WaitForCacheSync(ctx context.Context) bool { + if !ip.waitForStarted(ctx) { + return false + } + return cache.WaitForCacheSync(ctx.Done(), ip.getHasSyncedFuncs()...) +} + +func (ip *Informers) get(gvk schema.GroupVersionKind, obj runtime.Object) (res *Cache, started bool, ok bool) { + ip.mu.RLock() + defer ip.mu.RUnlock() + i, ok := ip.informersByType(obj)[gvk] + return i, ip.started, ok +} + +// Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns +// the Informer from the map. +func (ip *Informers) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *Cache, error) { + // Return the informer if it is found + i, started, ok := ip.get(gvk, obj) + if !ok { + var err error + if i, started, err = ip.addInformerToMap(gvk, obj); err != nil { + return started, nil, err + } + } + + if started && !i.Informer.HasSynced() { + // Wait for it to sync before returning the Informer so that folks don't read from a stale cache. + if !cache.WaitForCacheSync(ctx.Done(), i.Informer.HasSynced) { + return started, nil, apierrors.NewTimeoutError(fmt.Sprintf("failed waiting for %T Informer to sync", obj), 0) + } + } + + return started, i, nil +} + +func (ip *Informers) informersByType(obj runtime.Object) map[schema.GroupVersionKind]*Cache { + switch obj.(type) { + case runtime.Unstructured: + return ip.tracker.Unstructured + case *metav1.PartialObjectMetadata, *metav1.PartialObjectMetadataList: + return ip.tracker.Metadata + default: + return ip.tracker.Structured + } +} + +func (ip *Informers) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.Object) (*Cache, bool, error) { + ip.mu.Lock() + defer ip.mu.Unlock() + + // Check the cache to see if we already have an Informer. If we do, return the Informer. + // This is for the case where 2 routines tried to get the informer when it wasn't in the map + // so neither returned early, but the first one created it. + if i, ok := ip.informersByType(obj)[gvk]; ok { + return i, ip.started, nil + } + + // Create a NewSharedIndexInformer and add it to the map. + listWatcher, err := ip.makeListWatcher(gvk, obj) + if err != nil { + return nil, false, err + } + sharedIndexInformer := cache.NewSharedIndexInformer(&cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + ip.getSelector(gvk).ApplyToList(&opts) + return listWatcher.ListFunc(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + ip.getSelector(gvk).ApplyToList(&opts) + opts.Watch = true // Watch needs to be set to true separately + return listWatcher.WatchFunc(opts) + }, + }, obj, calculateResyncPeriod(ip.resync), cache.Indexers{ + cache.NamespaceIndex: cache.MetaNamespaceIndexFunc, + }) + + // Check to see if there is a transformer for this gvk + if err := sharedIndexInformer.SetTransform(ip.getTransform(gvk)); err != nil { + return nil, false, err + } + + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, false, err + } + + // Create the new entry and set it in the map. + i := &Cache{ + Informer: sharedIndexInformer, + Reader: CacheReader{ + indexer: sharedIndexInformer.GetIndexer(), + groupVersionKind: gvk, + scopeName: mapping.Scope.Name(), + disableDeepCopy: ip.getDisableDeepCopy(gvk), + }, + } + ip.informersByType(obj)[gvk] = i + + // Start the informer in case the InformersMap has started, otherwise it will be + // started when the InformersMap starts. + if ip.started { + ip.startInformerLocked(i.Informer) + } + return i, ip.started, nil +} + +func (ip *Informers) makeListWatcher(gvk schema.GroupVersionKind, obj runtime.Object) (*cache.ListWatch, error) { + // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the + // groupVersionKind to the Resource API we will use. + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + + // Figure out if the GVK we're dealing with is global, or namespace scoped. + var namespace string + if mapping.Scope.Name() == meta.RESTScopeNameNamespace { + namespace = restrictNamespaceBySelector(ip.namespace, ip.getSelector(gvk)) + } + + switch obj.(type) { + // + // Unstructured + // + case runtime.Unstructured: + // If the rest configuration has a negotiated serializer passed in, + // we should remove it and use the one that the dynamic client sets for us. + cfg := rest.CopyConfig(ip.config) + cfg.NegotiatedSerializer = nil + dynamicClient, err := dynamic.NewForConfigAndClient(cfg, ip.httpClient) + if err != nil { + return nil, err + } + resources := dynamicClient.Resource(mapping.Resource) + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + if namespace != "" { + return resources.Namespace(namespace).List(ip.ctx, opts) + } + return resources.List(ip.ctx, opts) + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + if namespace != "" { + return resources.Namespace(namespace).Watch(ip.ctx, opts) + } + return resources.Watch(ip.ctx, opts) + }, + }, nil + // + // Metadata + // + case *metav1.PartialObjectMetadata, *metav1.PartialObjectMetadataList: + // Always clear the negotiated serializer and use the one + // set from the metadata client. + cfg := rest.CopyConfig(ip.config) + cfg.NegotiatedSerializer = nil + + // Grab the metadata metadataClient. + metadataClient, err := metadata.NewForConfigAndClient(cfg, ip.httpClient) + if err != nil { + return nil, err + } + resources := metadataClient.Resource(mapping.Resource) + + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + var ( + list *metav1.PartialObjectMetadataList + err error + ) + if namespace != "" { + list, err = resources.Namespace(namespace).List(ip.ctx, opts) + } else { + list, err = resources.List(ip.ctx, opts) + } + if list != nil { + for i := range list.Items { + list.Items[i].SetGroupVersionKind(gvk) + } + } + return list, err + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watcher watch.Interface, err error) { + if namespace != "" { + watcher, err = resources.Namespace(namespace).Watch(ip.ctx, opts) + } else { + watcher, err = resources.Watch(ip.ctx, opts) + } + if err != nil { + return nil, err + } + return newGVKFixupWatcher(gvk, watcher), nil + }, + }, nil + // + // Structured. + // + default: + client, err := apiutil.RESTClientForGVK(gvk, false, ip.config, ip.codecs, ip.httpClient) + if err != nil { + return nil, err + } + listGVK := gvk.GroupVersion().WithKind(gvk.Kind + "List") + listObj, err := ip.scheme.New(listGVK) + if err != nil { + return nil, err + } + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + // Build the request. + req := client.Get().Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec) + if namespace != "" { + req.Namespace(namespace) + } + + // Create the resulting object, and execute the request. + res := listObj.DeepCopyObject() + if err := req.Do(ip.ctx).Into(res); err != nil { + return nil, err + } + return res, nil + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + // Build the request. + req := client.Get().Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec) + if namespace != "" { + req.Namespace(namespace) + } + // Call the watch. + return req.Watch(ip.ctx) + }, + }, nil + } +} + +// newGVKFixupWatcher adds a wrapper that preserves the GVK information when +// events come in. +// +// This works around a bug where GVK information is not passed into mapping +// functions when using the OnlyMetadata option in the builder. +// This issue is most likely caused by kubernetes/kubernetes#80609. +// See kubernetes-sigs/controller-runtime#1484. +// +// This was originally implemented as a cache.ResourceEventHandler wrapper but +// that contained a data race which was resolved by setting the GVK in a watch +// wrapper, before the objects are written to the cache. +// See kubernetes-sigs/controller-runtime#1650. +// +// The original watch wrapper was found to be incompatible with +// k8s.io/client-go/tools/cache.Reflector so it has been re-implemented as a +// watch.Filter which is compatible. +// See kubernetes-sigs/controller-runtime#1789. +func newGVKFixupWatcher(gvk schema.GroupVersionKind, watcher watch.Interface) watch.Interface { + return watch.Filter( + watcher, + func(in watch.Event) (watch.Event, bool) { + in.Object.GetObjectKind().SetGroupVersionKind(gvk) + return in, true + }, + ) +} + +// calculateResyncPeriod returns a duration based on the desired input +// this is so that multiple controllers don't get into lock-step and all +// hammer the apiserver with list requests simultaneously. +func calculateResyncPeriod(resync time.Duration) time.Duration { + // the factor will fall into [0.9, 1.1) + factor := rand.Float64()/5.0 + 0.9 //nolint:gosec + return time.Duration(float64(resync.Nanoseconds()) * factor) +} + +// restrictNamespaceBySelector returns either a global restriction for all ListWatches +// if not default/empty, or the namespace that a ListWatch for the specific resource +// is restricted to, based on a specified field selector for metadata.namespace field. +func restrictNamespaceBySelector(namespaceOpt string, s Selector) string { + if namespaceOpt != "" { + // namespace is already restricted + return namespaceOpt + } + fieldSelector := s.Field + if fieldSelector == nil || fieldSelector.Empty() { + return "" + } + // check whether a selector includes the namespace field + value, found := fieldSelector.RequiresExactMatch("metadata.namespace") + if found { + return value + } + return "" +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go deleted file mode 100644 index 1524d2316..000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go +++ /dev/null @@ -1,480 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import ( - "context" - "fmt" - "math/rand" - "sync" - "time" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/metadata" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" -) - -func init() { - rand.Seed(time.Now().UnixNano()) -} - -// clientListWatcherFunc knows how to create a ListWatcher. -type createListWatcherFunc func(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) - -// newSpecificInformersMap returns a new specificInformersMap (like -// the generical InformersMap, except that it doesn't implement WaitForCacheSync). -func newSpecificInformersMap(config *rest.Config, - scheme *runtime.Scheme, - mapper meta.RESTMapper, - resync time.Duration, - namespace string, - selectors SelectorsByGVK, - disableDeepCopy DisableDeepCopyByGVK, - transformers TransformFuncByObject, - createListWatcher createListWatcherFunc, -) *specificInformersMap { - ip := &specificInformersMap{ - config: config, - Scheme: scheme, - mapper: mapper, - informersByGVK: make(map[schema.GroupVersionKind]*MapEntry), - codecs: serializer.NewCodecFactory(scheme), - paramCodec: runtime.NewParameterCodec(scheme), - resync: resync, - startWait: make(chan struct{}), - createListWatcher: createListWatcher, - namespace: namespace, - selectors: selectors.forGVK, - disableDeepCopy: disableDeepCopy, - transformers: transformers, - } - return ip -} - -// MapEntry contains the cached data for an Informer. -type MapEntry struct { - // Informer is the cached informer - Informer cache.SharedIndexInformer - - // CacheReader wraps Informer and implements the CacheReader interface for a single type - Reader CacheReader -} - -// specificInformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. -// It uses a standard parameter codec constructed based on the given generated Scheme. -type specificInformersMap struct { - // Scheme maps runtime.Objects to GroupVersionKinds - Scheme *runtime.Scheme - - // config is used to talk to the apiserver - config *rest.Config - - // mapper maps GroupVersionKinds to Resources - mapper meta.RESTMapper - - // informersByGVK is the cache of informers keyed by groupVersionKind - informersByGVK map[schema.GroupVersionKind]*MapEntry - - // codecs is used to create a new REST client - codecs serializer.CodecFactory - - // paramCodec is used by list and watch - paramCodec runtime.ParameterCodec - - // stop is the stop channel to stop informers - stop <-chan struct{} - - // resync is the base frequency the informers are resynced - // a 10 percent jitter will be added to the resync period between informers - // so that all informers will not send list requests simultaneously. - resync time.Duration - - // mu guards access to the map - mu sync.RWMutex - - // start is true if the informers have been started - started bool - - // startWait is a channel that is closed after the - // informer has been started. - startWait chan struct{} - - // createClient knows how to create a client and a list object, - // and allows for abstracting over the particulars of structured vs - // unstructured objects. - createListWatcher createListWatcherFunc - - // namespace is the namespace that all ListWatches are restricted to - // default or empty string means all namespaces - namespace string - - // selectors are the label or field selectors that will be added to the - // ListWatch ListOptions. - selectors func(gvk schema.GroupVersionKind) Selector - - // disableDeepCopy indicates not to deep copy objects during get or list objects. - disableDeepCopy DisableDeepCopyByGVK - - // transform funcs are applied to objects before they are committed to the cache - transformers TransformFuncByObject -} - -// Start calls Run on each of the informers and sets started to true. Blocks on the context. -// It doesn't return start because it can't return an error, and it's not a runnable directly. -func (ip *specificInformersMap) Start(ctx context.Context) { - func() { - ip.mu.Lock() - defer ip.mu.Unlock() - - // Set the stop channel so it can be passed to informers that are added later - ip.stop = ctx.Done() - - // Start each informer - for _, informer := range ip.informersByGVK { - go informer.Informer.Run(ctx.Done()) - } - - // Set started to true so we immediately start any informers added later. - ip.started = true - close(ip.startWait) - }() - <-ctx.Done() -} - -func (ip *specificInformersMap) waitForStarted(ctx context.Context) bool { - select { - case <-ip.startWait: - return true - case <-ctx.Done(): - return false - } -} - -// HasSyncedFuncs returns all the HasSynced functions for the informers in this map. -func (ip *specificInformersMap) HasSyncedFuncs() []cache.InformerSynced { - ip.mu.RLock() - defer ip.mu.RUnlock() - syncedFuncs := make([]cache.InformerSynced, 0, len(ip.informersByGVK)) - for _, informer := range ip.informersByGVK { - syncedFuncs = append(syncedFuncs, informer.Informer.HasSynced) - } - return syncedFuncs -} - -// Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns -// the Informer from the map. -func (ip *specificInformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) { - // Return the informer if it is found - i, started, ok := func() (*MapEntry, bool, bool) { - ip.mu.RLock() - defer ip.mu.RUnlock() - i, ok := ip.informersByGVK[gvk] - return i, ip.started, ok - }() - - if !ok { - var err error - if i, started, err = ip.addInformerToMap(gvk, obj); err != nil { - return started, nil, err - } - } - - if started && !i.Informer.HasSynced() { - // Wait for it to sync before returning the Informer so that folks don't read from a stale cache. - if !cache.WaitForCacheSync(ctx.Done(), i.Informer.HasSynced) { - return started, nil, apierrors.NewTimeoutError(fmt.Sprintf("failed waiting for %T Informer to sync", obj), 0) - } - } - - return started, i, nil -} - -func (ip *specificInformersMap) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, bool, error) { - ip.mu.Lock() - defer ip.mu.Unlock() - - // Check the cache to see if we already have an Informer. If we do, return the Informer. - // This is for the case where 2 routines tried to get the informer when it wasn't in the map - // so neither returned early, but the first one created it. - if i, ok := ip.informersByGVK[gvk]; ok { - return i, ip.started, nil - } - - // Create a NewSharedIndexInformer and add it to the map. - var lw *cache.ListWatch - lw, err := ip.createListWatcher(gvk, ip) - if err != nil { - return nil, false, err - } - ni := cache.NewSharedIndexInformer(lw, obj, resyncPeriod(ip.resync)(), cache.Indexers{ - cache.NamespaceIndex: cache.MetaNamespaceIndexFunc, - }) - - // Check to see if there is a transformer for this gvk - if err := ni.SetTransform(ip.transformers.Get(gvk)); err != nil { - return nil, false, err - } - - rm, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return nil, false, err - } - - i := &MapEntry{ - Informer: ni, - Reader: CacheReader{ - indexer: ni.GetIndexer(), - groupVersionKind: gvk, - scopeName: rm.Scope.Name(), - disableDeepCopy: ip.disableDeepCopy.IsDisabled(gvk), - }, - } - ip.informersByGVK[gvk] = i - - // Start the Informer if need by - // TODO(seans): write thorough tests and document what happens here - can you add indexers? - // can you add eventhandlers? - if ip.started { - go i.Informer.Run(ip.stop) - } - return i, ip.started, nil -} - -// newListWatch returns a new ListWatch object that can be used to create a SharedIndexInformer. -func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { - // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the - // groupVersionKind to the Resource API we will use. - mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return nil, err - } - - client, err := apiutil.RESTClientForGVK(gvk, false, ip.config, ip.codecs) - if err != nil { - return nil, err - } - listGVK := gvk.GroupVersion().WithKind(gvk.Kind + "List") - listObj, err := ip.Scheme.New(listGVK) - if err != nil { - return nil, err - } - - // TODO: the functions that make use of this ListWatch should be adapted to - // pass in their own contexts instead of relying on this fixed one here. - ctx := context.TODO() - // Create a new ListWatch for the obj - return &cache.ListWatch{ - ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { - ip.selectors(gvk).ApplyToList(&opts) - res := listObj.DeepCopyObject() - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot - err := client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Do(ctx).Into(res) - return res, err - }, - // Setup the watch function - WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { - ip.selectors(gvk).ApplyToList(&opts) - // Watch needs to be set to true separately - opts.Watch = true - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot - return client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Watch(ctx) - }, - }, nil -} - -func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { - // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the - // groupVersionKind to the Resource API we will use. - mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return nil, err - } - - // If the rest configuration has a negotiated serializer passed in, - // we should remove it and use the one that the dynamic client sets for us. - cfg := rest.CopyConfig(ip.config) - cfg.NegotiatedSerializer = nil - dynamicClient, err := dynamic.NewForConfig(cfg) - if err != nil { - return nil, err - } - - // TODO: the functions that make use of this ListWatch should be adapted to - // pass in their own contexts instead of relying on this fixed one here. - ctx := context.TODO() - // Create a new ListWatch for the obj - return &cache.ListWatch{ - ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { - ip.selectors(gvk).ApplyToList(&opts) - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - return dynamicClient.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts) - } - return dynamicClient.Resource(mapping.Resource).List(ctx, opts) - }, - // Setup the watch function - WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { - ip.selectors(gvk).ApplyToList(&opts) - // Watch needs to be set to true separately - opts.Watch = true - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - return dynamicClient.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts) - } - return dynamicClient.Resource(mapping.Resource).Watch(ctx, opts) - }, - }, nil -} - -func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { - // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the - // groupVersionKind to the Resource API we will use. - mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return nil, err - } - - // Always clear the negotiated serializer and use the one - // set from the metadata client. - cfg := rest.CopyConfig(ip.config) - cfg.NegotiatedSerializer = nil - - // grab the metadata client - client, err := metadata.NewForConfig(cfg) - if err != nil { - return nil, err - } - - // TODO: the functions that make use of this ListWatch should be adapted to - // pass in their own contexts instead of relying on this fixed one here. - ctx := context.TODO() - - // create the relevant listwatch - return &cache.ListWatch{ - ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { - ip.selectors(gvk).ApplyToList(&opts) - - var ( - list *metav1.PartialObjectMetadataList - err error - ) - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - list, err = client.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts) - } else { - list, err = client.Resource(mapping.Resource).List(ctx, opts) - } - if list != nil { - for i := range list.Items { - list.Items[i].SetGroupVersionKind(gvk) - } - } - return list, err - }, - // Setup the watch function - WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { - ip.selectors(gvk).ApplyToList(&opts) - // Watch needs to be set to true separately - opts.Watch = true - - var ( - watcher watch.Interface - err error - ) - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - watcher, err = client.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts) - } else { - watcher, err = client.Resource(mapping.Resource).Watch(ctx, opts) - } - if watcher != nil { - watcher = newGVKFixupWatcher(gvk, watcher) - } - return watcher, err - }, - }, nil -} - -// newGVKFixupWatcher adds a wrapper that preserves the GVK information when -// events come in. -// -// This works around a bug where GVK information is not passed into mapping -// functions when using the OnlyMetadata option in the builder. -// This issue is most likely caused by kubernetes/kubernetes#80609. -// See kubernetes-sigs/controller-runtime#1484. -// -// This was originally implemented as a cache.ResourceEventHandler wrapper but -// that contained a data race which was resolved by setting the GVK in a watch -// wrapper, before the objects are written to the cache. -// See kubernetes-sigs/controller-runtime#1650. -// -// The original watch wrapper was found to be incompatible with -// k8s.io/client-go/tools/cache.Reflector so it has been re-implemented as a -// watch.Filter which is compatible. -// See kubernetes-sigs/controller-runtime#1789. -func newGVKFixupWatcher(gvk schema.GroupVersionKind, watcher watch.Interface) watch.Interface { - return watch.Filter( - watcher, - func(in watch.Event) (watch.Event, bool) { - in.Object.GetObjectKind().SetGroupVersionKind(gvk) - return in, true - }, - ) -} - -// resyncPeriod returns a function which generates a duration each time it is -// invoked; this is so that multiple controllers don't get into lock-step and all -// hammer the apiserver with list requests simultaneously. -func resyncPeriod(resync time.Duration) func() time.Duration { - return func() time.Duration { - // the factor will fall into [0.9, 1.1) - factor := rand.Float64()/5.0 + 0.9 //nolint:gosec - return time.Duration(float64(resync.Nanoseconds()) * factor) - } -} - -// restrictNamespaceBySelector returns either a global restriction for all ListWatches -// if not default/empty, or the namespace that a ListWatch for the specific resource -// is restricted to, based on a specified field selector for metadata.namespace field. -func restrictNamespaceBySelector(namespaceOpt string, s Selector) string { - if namespaceOpt != "" { - // namespace is already restricted - return namespaceOpt - } - fieldSelector := s.Field - if fieldSelector == nil || fieldSelector.Empty() { - return "" - } - // check whether a selector includes the namespace field - value, found := fieldSelector.RequiresExactMatch("metadata.namespace") - if found { - return value - } - return "" -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go index 4eff32fb3..c674379b9 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go @@ -20,23 +20,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" ) -// SelectorsByGVK associate a GroupVersionKind to a field/label selector. -type SelectorsByGVK map[schema.GroupVersionKind]Selector - -func (s SelectorsByGVK) forGVK(gvk schema.GroupVersionKind) Selector { - if specific, found := s[gvk]; found { - return specific - } - if defaultSelector, found := s[schema.GroupVersionKind{}]; found { - return defaultSelector - } - - return Selector{} -} - // Selector specify the label/field selector to fill in ListOptions. type Selector struct { Label labels.Selector diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go index f69e02262..0725f550c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go @@ -8,9 +8,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -// TransformFuncByObject provides access to the correct transform function for +// TransformFuncByGVK provides access to the correct transform function for // any given GVK. -type TransformFuncByObject interface { +type TransformFuncByGVK interface { Set(runtime.Object, *runtime.Scheme, cache.TransformFunc) error Get(schema.GroupVersionKind) cache.TransformFunc SetDefault(transformer cache.TransformFunc) @@ -21,9 +21,9 @@ type transformFuncByGVK struct { transformers map[schema.GroupVersionKind]cache.TransformFunc } -// TransformFuncByObjectFromMap creates a TransformFuncByObject from a map that +// TransformFuncByGVKFromMap creates a TransformFuncByGVK from a map that // maps GVKs to TransformFuncs. -func TransformFuncByObjectFromMap(in map[schema.GroupVersionKind]cache.TransformFunc) TransformFuncByObject { +func TransformFuncByGVKFromMap(in map[schema.GroupVersionKind]cache.TransformFunc) TransformFuncByGVK { byGVK := &transformFuncByGVK{} if defaultFunc, hasDefault := in[schema.GroupVersionKind{}]; hasDefault { byGVK.defaultTransform = defaultFunc diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go index fccb36471..ac97beae9 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go @@ -28,12 +28,9 @@ import ( "k8s.io/client-go/rest" toolscache "k8s.io/client-go/tools/cache" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -// NewCacheFunc - Function for creating a new cache from the options and a rest config. -type NewCacheFunc func(config *rest.Config, opts Options) (Cache, error) - // a new global namespaced cache to handle cluster scoped resources. const globalCache = "_cluster-scope" @@ -43,31 +40,43 @@ const globalCache = "_cluster-scope" // a global cache for cluster scoped resource. Note that this is not intended // to be used for excluding namespaces, this is better done via a Predicate. Also note that // you may face performance issues when using this with a high number of namespaces. +// +// Deprecated: Use cache.Options.Namespaces instead. func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc { return func(config *rest.Config, opts Options) (Cache, error) { - opts, err := defaultOpts(config, opts) - if err != nil { - return nil, err - } + opts.Namespaces = namespaces + return newMultiNamespaceCache(config, opts) + } +} - caches := map[string]Cache{} +func newMultiNamespaceCache(config *rest.Config, opts Options) (Cache, error) { + if len(opts.Namespaces) < 2 { + return nil, fmt.Errorf("must specify more than one namespace to use multi-namespace cache") + } + opts, err := defaultOpts(config, opts) + if err != nil { + return nil, err + } - // create a cache for cluster scoped resources - gCache, err := New(config, opts) + // Create every namespace cache. + caches := map[string]Cache{} + for _, ns := range opts.Namespaces { + opts.Namespaces = []string{ns} + c, err := New(config, opts) if err != nil { - return nil, fmt.Errorf("error creating global cache: %w", err) + return nil, err } + caches[ns] = c + } - for _, ns := range namespaces { - opts.Namespace = ns - c, err := New(config, opts) - if err != nil { - return nil, err - } - caches[ns] = c - } - return &multiNamespaceCache{namespaceToCache: caches, Scheme: opts.Scheme, RESTMapper: opts.Mapper, clusterCache: gCache}, nil + // Create a cache for cluster scoped resources. + opts.Namespaces = []string{} + gCache, err := New(config, opts) + if err != nil { + return nil, fmt.Errorf("error creating global cache: %w", err) } + + return &multiNamespaceCache{namespaceToCache: caches, Scheme: opts.Scheme, RESTMapper: opts.Mapper, clusterCache: gCache}, nil } // multiNamespaceCache knows how to handle multiple namespaced caches @@ -89,7 +98,7 @@ func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj client.Object // If the object is clusterscoped, get the informer from clusterCache, // if not use the namespaced caches. - isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + isNamespaced, err := apiutil.IsObjectNamespaced(obj, c.Scheme, c.RESTMapper) if err != nil { return nil, err } @@ -119,7 +128,7 @@ func (c *multiNamespaceCache) GetInformerForKind(ctx context.Context, gvk schema // If the object is clusterscoped, get the informer from clusterCache, // if not use the namespaced caches. - isNamespaced, err := objectutil.IsAPINamespacedWithGVK(gvk, c.Scheme, c.RESTMapper) + isNamespaced, err := apiutil.IsGVKNamespaced(gvk, c.RESTMapper) if err != nil { return nil, err } @@ -183,9 +192,9 @@ func (c *multiNamespaceCache) WaitForCacheSync(ctx context.Context) bool { } func (c *multiNamespaceCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { - isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + isNamespaced, err := apiutil.IsObjectNamespaced(obj, c.Scheme, c.RESTMapper) if err != nil { - return nil //nolint:nilerr + return err } if !isNamespaced { @@ -201,7 +210,7 @@ func (c *multiNamespaceCache) IndexField(ctx context.Context, obj client.Object, } func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { - isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + isNamespaced, err := apiutil.IsObjectNamespaced(obj, c.Scheme, c.RESTMapper) if err != nil { return err } @@ -223,7 +232,7 @@ func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, listOpts := client.ListOptions{} listOpts.ApplyOptions(opts) - isNamespaced, err := objectutil.IsAPINamespaced(list, c.Scheme, c.RESTMapper) + isNamespaced, err := apiutil.IsObjectNamespaced(list, c.Scheme, c.RESTMapper) if err != nil { return err } @@ -293,42 +302,63 @@ type multiNamespaceInformer struct { namespaceToInformer map[string]Informer } +type handlerRegistration struct { + handles map[string]toolscache.ResourceEventHandlerRegistration +} + +type syncer interface { + HasSynced() bool +} + +// HasSynced asserts that the handler has been called for the full initial state of the informer. +// This uses syncer to be compatible between client-go 1.27+ and older versions when the interface changed. +func (h handlerRegistration) HasSynced() bool { + for _, reg := range h.handles { + if s, ok := reg.(syncer); ok { + if !s.HasSynced() { + return false + } + } + } + return true +} + var _ Informer = &multiNamespaceInformer{} // AddEventHandler adds the handler to each namespaced informer. func (i *multiNamespaceInformer) AddEventHandler(handler toolscache.ResourceEventHandler) (toolscache.ResourceEventHandlerRegistration, error) { - handles := make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer)) + handles := handlerRegistration{handles: make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer))} for ns, informer := range i.namespaceToInformer { registration, err := informer.AddEventHandler(handler) if err != nil { return nil, err } - handles[ns] = registration + handles.handles[ns] = registration } return handles, nil } // AddEventHandlerWithResyncPeriod adds the handler with a resync period to each namespaced informer. func (i *multiNamespaceInformer) AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) (toolscache.ResourceEventHandlerRegistration, error) { - handles := make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer)) + handles := handlerRegistration{handles: make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer))} for ns, informer := range i.namespaceToInformer { registration, err := informer.AddEventHandlerWithResyncPeriod(handler, resyncPeriod) if err != nil { return nil, err } - handles[ns] = registration + handles.handles[ns] = registration } return handles, nil } // RemoveEventHandler removes a formerly added event handler given by its registration handle. func (i *multiNamespaceInformer) RemoveEventHandler(h toolscache.ResourceEventHandlerRegistration) error { - handles, ok := h.(map[string]toolscache.ResourceEventHandlerRegistration) + handles, ok := h.(handlerRegistration) if !ok { return fmt.Errorf("it is not the registration returned by multiNamespaceInformer") } for ns, informer := range i.namespaceToInformer { - registration, ok := handles[ns] + registration, ok := handles.handles[ns] if !ok { continue } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go b/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go index 1030013db..2b9b60d8d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go @@ -19,9 +19,14 @@ package certwatcher import ( "context" "crypto/tls" + "fmt" "sync" + "time" "github.com/fsnotify/fsnotify" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" ) @@ -39,6 +44,9 @@ type CertWatcher struct { certPath string keyPath string + + // callback is a function to be invoked when the certificate changes. + callback func(tls.Certificate) } // New returns a new CertWatcher watching the given certificate and key. @@ -63,6 +71,17 @@ func New(certPath, keyPath string) (*CertWatcher, error) { return cw, nil } +// RegisterCallback registers a callback to be invoked when the certificate changes. +func (cw *CertWatcher) RegisterCallback(callback func(tls.Certificate)) { + cw.Lock() + defer cw.Unlock() + // If the current certificate is not nil, invoke the callback immediately. + if cw.currentCert != nil { + callback(*cw.currentCert) + } + cw.callback = callback +} + // GetCertificate fetches the currently loaded certificate, which may be nil. func (cw *CertWatcher) GetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { cw.RLock() @@ -72,11 +91,22 @@ func (cw *CertWatcher) GetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, // Start starts the watch on the certificate and key files. func (cw *CertWatcher) Start(ctx context.Context) error { - files := []string{cw.certPath, cw.keyPath} - - for _, f := range files { - if err := cw.watcher.Add(f); err != nil { - return err + files := sets.New(cw.certPath, cw.keyPath) + + { + var watchErr error + if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 10*time.Second, true, func(ctx context.Context) (done bool, err error) { + for _, f := range files.UnsortedList() { + if err := cw.watcher.Add(f); err != nil { + watchErr = err + return false, nil //nolint:nilerr // We want to keep trying. + } + // We've added the watch, remove it from the set. + files.Delete(f) + } + return true, nil + }); err != nil { + return fmt.Errorf("failed to add watches: %w", kerrors.NewAggregate([]error{err, watchErr})) } } @@ -130,6 +160,14 @@ func (cw *CertWatcher) ReadCertificate() error { log.Info("Updated current TLS certificate") + // If a callback is registered, invoke it with the new certificate. + cw.RLock() + defer cw.RUnlock() + if cw.callback != nil { + go func() { + cw.callback(cert) + }() + } return nil } @@ -154,13 +192,13 @@ func (cw *CertWatcher) handleEvent(event fsnotify.Event) { } func isWrite(event fsnotify.Event) bool { - return event.Op&fsnotify.Write == fsnotify.Write + return event.Op.Has(fsnotify.Write) } func isCreate(event fsnotify.Event) bool { - return event.Op&fsnotify.Create == fsnotify.Create + return event.Op.Has(fsnotify.Create) } func isRemove(event fsnotify.Event) bool { - return event.Op&fsnotify.Remove == fsnotify.Remove + return event.Op.Has(fsnotify.Remove) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go index 8e2ac48fa..6a1bfb546 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -20,7 +20,9 @@ limitations under the License. package apiutil import ( + "errors" "fmt" + "net/http" "reflect" "sync" @@ -30,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/restmapper" @@ -59,9 +62,13 @@ func AddToProtobufScheme(addToScheme func(*runtime.Scheme) error) error { // NewDiscoveryRESTMapper constructs a new RESTMapper based on discovery // information fetched by a new client with the given config. -func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { +func NewDiscoveryRESTMapper(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { + if httpClient == nil { + return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") + } + // Get a mapper - dc, err := discovery.NewDiscoveryClientForConfig(c) + dc, err := discovery.NewDiscoveryClientForConfigAndClient(c, httpClient) if err != nil { return nil, err } @@ -72,6 +79,36 @@ func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { return restmapper.NewDiscoveryRESTMapper(gr), nil } +// IsObjectNamespaced returns true if the object is namespace scoped. +// For unstructured objects the gvk is found from the object itself. +func IsObjectNamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper meta.RESTMapper) (bool, error) { + gvk, err := GVKForObject(obj, scheme) + if err != nil { + return false, err + } + + return IsGVKNamespaced(gvk, restmapper) +} + +// IsGVKNamespaced returns true if the object having the provided +// GVK is namespace scoped. +func IsGVKNamespaced(gvk schema.GroupVersionKind, restmapper meta.RESTMapper) (bool, error) { + restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}) + if err != nil { + return false, fmt.Errorf("failed to get restmapping: %w", err) + } + + scope := restmapping.Scope.Name() + if scope == "" { + return false, errors.New("scope cannot be identified, empty scope returned") + } + + if scope != meta.RESTScopeNameRoot { + return true, nil + } + return false, nil +} + // GVKForObject finds the GroupVersionKind associated with the given object, if there is only a single such GVK. func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionKind, error) { // TODO(directxman12): do we want to generalize this to arbitrary container types? @@ -142,21 +179,11 @@ func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersi // RESTClientForGVK constructs a new rest.Interface capable of accessing the resource associated // with the given GroupVersionKind. The REST client will be configured to use the negotiated serializer from // baseConfig, if set, otherwise a default serializer will be set. -func RESTClientForGVK(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) { - return rest.RESTClientFor(createRestConfig(gvk, isUnstructured, baseConfig, codecs)) -} - -// serializerWithDecodedGVK is a CodecFactory that overrides the DecoderToVersion of a WithoutConversionCodecFactory -// in order to avoid clearing the GVK from the decoded object. -// -// See https://github.com/kubernetes/kubernetes/issues/80609. -type serializerWithDecodedGVK struct { - serializer.WithoutConversionCodecFactory -} - -// DecoderToVersion returns an decoder that does not do conversion. -func (f serializerWithDecodedGVK) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { - return serializer +func RESTClientForGVK(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory, httpClient *http.Client) (rest.Interface, error) { + if httpClient == nil { + return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") + } + return rest.RESTClientForConfigAndClient(createRestConfig(gvk, isUnstructured, baseConfig, codecs), httpClient) } // createRestConfig copies the base config and updates needed fields for a new rest config. @@ -183,9 +210,8 @@ func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConf } if isUnstructured { - // If the object is unstructured, we need to preserve the GVK information. - // Use our own custom serializer. - cfg.NegotiatedSerializer = serializerWithDecodedGVK{serializer.WithoutConversionCodecFactory{CodecFactory: codecs}} + // If the object is unstructured, we use the client-go dynamic serializer. + cfg = dynamic.ConfigFor(cfg) } else { cfg.NegotiatedSerializer = serializerWithTargetZeroingDecode{NegotiatedSerializer: serializer.WithoutConversionCodecFactory{CodecFactory: codecs}} } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go deleted file mode 100644 index 6b9dcf68a..000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go +++ /dev/null @@ -1,301 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apiutil - -import ( - "sync" - "sync/atomic" - - "golang.org/x/time/rate" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/discovery" - "k8s.io/client-go/rest" - "k8s.io/client-go/restmapper" -) - -// dynamicRESTMapper is a RESTMapper that dynamically discovers resource -// types at runtime. -type dynamicRESTMapper struct { - mu sync.RWMutex // protects the following fields - staticMapper meta.RESTMapper - limiter *rate.Limiter - newMapper func() (meta.RESTMapper, error) - - lazy bool - // Used for lazy init. - inited uint32 - initMtx sync.Mutex - - useLazyRestmapper bool -} - -// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper. -type DynamicRESTMapperOption func(*dynamicRESTMapper) error - -// WithLimiter sets the RESTMapper's underlying limiter to lim. -func WithLimiter(lim *rate.Limiter) DynamicRESTMapperOption { - return func(drm *dynamicRESTMapper) error { - drm.limiter = lim - return nil - } -} - -// WithLazyDiscovery prevents the RESTMapper from discovering REST mappings -// until an API call is made. -var WithLazyDiscovery DynamicRESTMapperOption = func(drm *dynamicRESTMapper) error { - drm.lazy = true - return nil -} - -// WithExperimentalLazyMapper enables experimental more advanced Lazy Restmapping mechanism. -var WithExperimentalLazyMapper DynamicRESTMapperOption = func(drm *dynamicRESTMapper) error { - drm.useLazyRestmapper = true - return nil -} - -// WithCustomMapper supports setting a custom RESTMapper refresher instead of -// the default method, which uses a discovery client. -// -// This exists mainly for testing, but can be useful if you need tighter control -// over how discovery is performed, which discovery endpoints are queried, etc. -func WithCustomMapper(newMapper func() (meta.RESTMapper, error)) DynamicRESTMapperOption { - return func(drm *dynamicRESTMapper) error { - drm.newMapper = newMapper - return nil - } -} - -// NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic -// RESTMapper dynamically discovers resource types at runtime. opts -// configure the RESTMapper. -func NewDynamicRESTMapper(cfg *rest.Config, opts ...DynamicRESTMapperOption) (meta.RESTMapper, error) { - client, err := discovery.NewDiscoveryClientForConfig(cfg) - if err != nil { - return nil, err - } - drm := &dynamicRESTMapper{ - limiter: rate.NewLimiter(rate.Limit(defaultRefillRate), defaultLimitSize), - newMapper: func() (meta.RESTMapper, error) { - groupResources, err := restmapper.GetAPIGroupResources(client) - if err != nil { - return nil, err - } - return restmapper.NewDiscoveryRESTMapper(groupResources), nil - }, - } - for _, opt := range opts { - if err = opt(drm); err != nil { - return nil, err - } - } - if drm.useLazyRestmapper { - return newLazyRESTMapperWithClient(client) - } - if !drm.lazy { - if err := drm.setStaticMapper(); err != nil { - return nil, err - } - } - return drm, nil -} - -var ( - // defaultRefilRate is the default rate at which potential calls are - // added back to the "bucket" of allowed calls. - defaultRefillRate = 5 - // defaultLimitSize is the default starting/max number of potential calls - // per second. Once a call is used, it's added back to the bucket at a rate - // of defaultRefillRate per second. - defaultLimitSize = 5 -) - -// setStaticMapper sets drm's staticMapper by querying its client, regardless -// of reload backoff. -func (drm *dynamicRESTMapper) setStaticMapper() error { - newMapper, err := drm.newMapper() - if err != nil { - return err - } - drm.staticMapper = newMapper - return nil -} - -// init initializes drm only once if drm is lazy. -func (drm *dynamicRESTMapper) init() (err error) { - // skip init if drm is not lazy or has initialized - if !drm.lazy || atomic.LoadUint32(&drm.inited) != 0 { - return nil - } - - drm.initMtx.Lock() - defer drm.initMtx.Unlock() - if drm.inited == 0 { - if err = drm.setStaticMapper(); err == nil { - atomic.StoreUint32(&drm.inited, 1) - } - } - return err -} - -// checkAndReload attempts to call the given callback, which is assumed to be dependent -// on the data in the restmapper. -// -// If the callback returns an error matching meta.IsNoMatchErr, it will attempt to reload -// the RESTMapper's data and re-call the callback once that's occurred. -// If the callback returns any other error, the function will return immediately regardless. -// -// It will take care of ensuring that reloads are rate-limited and that extraneous calls -// aren't made. If a reload would exceed the limiters rate, it returns the error return by -// the callback. -// It's thread-safe, and worries about thread-safety for the callback (so the callback does -// not need to attempt to lock the restmapper). -func (drm *dynamicRESTMapper) checkAndReload(checkNeedsReload func() error) error { - // first, check the common path -- data is fresh enough - // (use an IIFE for the lock's defer) - err := func() error { - drm.mu.RLock() - defer drm.mu.RUnlock() - - return checkNeedsReload() - }() - - needsReload := meta.IsNoMatchError(err) - if !needsReload { - return err - } - - // if the data wasn't fresh, we'll need to try and update it, so grab the lock... - drm.mu.Lock() - defer drm.mu.Unlock() - - // ... and double-check that we didn't reload in the meantime - err = checkNeedsReload() - needsReload = meta.IsNoMatchError(err) - if !needsReload { - return err - } - - // we're still stale, so grab a rate-limit token if we can... - if !drm.limiter.Allow() { - // return error from static mapper here, we have refreshed often enough (exceeding rate of provided limiter) - // so that client's can handle this the same way as a "normal" NoResourceMatchError / NoKindMatchError - return err - } - - // ...reload... - if err := drm.setStaticMapper(); err != nil { - return err - } - - // ...and return the results of the closure regardless - return checkNeedsReload() -} - -// TODO: wrap reload errors on NoKindMatchError with go 1.13 errors. - -func (drm *dynamicRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - if err := drm.init(); err != nil { - return schema.GroupVersionKind{}, err - } - var gvk schema.GroupVersionKind - err := drm.checkAndReload(func() error { - var err error - gvk, err = drm.staticMapper.KindFor(resource) - return err - }) - return gvk, err -} - -func (drm *dynamicRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { - if err := drm.init(); err != nil { - return nil, err - } - var gvks []schema.GroupVersionKind - err := drm.checkAndReload(func() error { - var err error - gvks, err = drm.staticMapper.KindsFor(resource) - return err - }) - return gvks, err -} - -func (drm *dynamicRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { - if err := drm.init(); err != nil { - return schema.GroupVersionResource{}, err - } - - var gvr schema.GroupVersionResource - err := drm.checkAndReload(func() error { - var err error - gvr, err = drm.staticMapper.ResourceFor(input) - return err - }) - return gvr, err -} - -func (drm *dynamicRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { - if err := drm.init(); err != nil { - return nil, err - } - var gvrs []schema.GroupVersionResource - err := drm.checkAndReload(func() error { - var err error - gvrs, err = drm.staticMapper.ResourcesFor(input) - return err - }) - return gvrs, err -} - -func (drm *dynamicRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { - if err := drm.init(); err != nil { - return nil, err - } - var mapping *meta.RESTMapping - err := drm.checkAndReload(func() error { - var err error - mapping, err = drm.staticMapper.RESTMapping(gk, versions...) - return err - }) - return mapping, err -} - -func (drm *dynamicRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { - if err := drm.init(); err != nil { - return nil, err - } - var mappings []*meta.RESTMapping - err := drm.checkAndReload(func() error { - var err error - mappings, err = drm.staticMapper.RESTMappings(gk, versions...) - return err - }) - return mappings, err -} - -func (drm *dynamicRESTMapper) ResourceSingularizer(resource string) (string, error) { - if err := drm.init(); err != nil { - return "", err - } - var singular string - err := drm.checkAndReload(func() error { - var err error - singular, err = drm.staticMapper.ResourceSingularizer(resource) - return err - }) - return singular, err -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/lazyrestmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go similarity index 59% rename from vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/lazyrestmapper.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go index e9b1e710c..f14f8a9f5 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/lazyrestmapper.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go @@ -18,137 +18,145 @@ package apiutil import ( "fmt" + "net/http" "sync" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" "k8s.io/client-go/restmapper" ) -// lazyRESTMapper is a RESTMapper that will lazily query the provided +// NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic +// RESTMapper dynamically discovers resource types at runtime. +func NewDynamicRESTMapper(cfg *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { + if httpClient == nil { + return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") + } + + client, err := discovery.NewDiscoveryClientForConfigAndClient(cfg, httpClient) + if err != nil { + return nil, err + } + return &mapper{ + mapper: restmapper.NewDiscoveryRESTMapper([]*restmapper.APIGroupResources{}), + client: client, + knownGroups: map[string]*restmapper.APIGroupResources{}, + apiGroups: map[string]*metav1.APIGroup{}, + }, nil +} + +// mapper is a RESTMapper that will lazily query the provided // client for discovery information to do REST mappings. -type lazyRESTMapper struct { +type mapper struct { mapper meta.RESTMapper client *discovery.DiscoveryClient knownGroups map[string]*restmapper.APIGroupResources - apiGroups []metav1.APIGroup + apiGroups map[string]*metav1.APIGroup // mutex to provide thread-safe mapper reloading. - mu sync.Mutex -} - -// newLazyRESTMapperWithClient initializes a LazyRESTMapper with a custom discovery client. -func newLazyRESTMapperWithClient(discoveryClient *discovery.DiscoveryClient) (meta.RESTMapper, error) { - return &lazyRESTMapper{ - mapper: restmapper.NewDiscoveryRESTMapper([]*restmapper.APIGroupResources{}), - client: discoveryClient, - knownGroups: map[string]*restmapper.APIGroupResources{}, - apiGroups: []metav1.APIGroup{}, - }, nil + mu sync.RWMutex } // KindFor implements Mapper.KindFor. -func (m *lazyRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - res, err := m.mapper.KindFor(resource) +func (m *mapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + res, err := m.getMapper().KindFor(resource) if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { - return res, err + if err := m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { + return schema.GroupVersionKind{}, err } - - res, err = m.mapper.KindFor(resource) + res, err = m.getMapper().KindFor(resource) } return res, err } // KindsFor implements Mapper.KindsFor. -func (m *lazyRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { - res, err := m.mapper.KindsFor(resource) +func (m *mapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + res, err := m.getMapper().KindsFor(resource) if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { - return res, err + if err := m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { + return nil, err } - - res, err = m.mapper.KindsFor(resource) + res, err = m.getMapper().KindsFor(resource) } return res, err } // ResourceFor implements Mapper.ResourceFor. -func (m *lazyRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { - res, err := m.mapper.ResourceFor(input) +func (m *mapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + res, err := m.getMapper().ResourceFor(input) if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(input.Group, input.Version); err != nil { - return res, err + if err := m.addKnownGroupAndReload(input.Group, input.Version); err != nil { + return schema.GroupVersionResource{}, err } - - res, err = m.mapper.ResourceFor(input) + res, err = m.getMapper().ResourceFor(input) } return res, err } // ResourcesFor implements Mapper.ResourcesFor. -func (m *lazyRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { - res, err := m.mapper.ResourcesFor(input) +func (m *mapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + res, err := m.getMapper().ResourcesFor(input) if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(input.Group, input.Version); err != nil { - return res, err + if err := m.addKnownGroupAndReload(input.Group, input.Version); err != nil { + return nil, err } - - res, err = m.mapper.ResourcesFor(input) + res, err = m.getMapper().ResourcesFor(input) } return res, err } // RESTMapping implements Mapper.RESTMapping. -func (m *lazyRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { - res, err := m.mapper.RESTMapping(gk, versions...) +func (m *mapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + res, err := m.getMapper().RESTMapping(gk, versions...) if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(gk.Group, versions...); err != nil { - return res, err + if err := m.addKnownGroupAndReload(gk.Group, versions...); err != nil { + return nil, err } - - res, err = m.mapper.RESTMapping(gk, versions...) + res, err = m.getMapper().RESTMapping(gk, versions...) } return res, err } // RESTMappings implements Mapper.RESTMappings. -func (m *lazyRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { - res, err := m.mapper.RESTMappings(gk, versions...) +func (m *mapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + res, err := m.getMapper().RESTMappings(gk, versions...) if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(gk.Group, versions...); err != nil { - return res, err + if err := m.addKnownGroupAndReload(gk.Group, versions...); err != nil { + return nil, err } - - res, err = m.mapper.RESTMappings(gk, versions...) + res, err = m.getMapper().RESTMappings(gk, versions...) } return res, err } // ResourceSingularizer implements Mapper.ResourceSingularizer. -func (m *lazyRESTMapper) ResourceSingularizer(resource string) (string, error) { - return m.mapper.ResourceSingularizer(resource) +func (m *mapper) ResourceSingularizer(resource string) (string, error) { + return m.getMapper().ResourceSingularizer(resource) +} + +func (m *mapper) getMapper() meta.RESTMapper { + m.mu.RLock() + defer m.mu.RUnlock() + return m.mapper } // addKnownGroupAndReload reloads the mapper with updated information about missing API group. // versions can be specified for partial updates, for instance for v1beta1 version only. -func (m *lazyRESTMapper) addKnownGroupAndReload(groupName string, versions ...string) error { - m.mu.Lock() - defer m.mu.Unlock() - +func (m *mapper) addKnownGroupAndReload(groupName string, versions ...string) error { // If no specific versions are set by user, we will scan all available ones for the API group. // This operation requires 2 requests: /api and /apis, but only once. For all subsequent calls // this data will be taken from cache. if len(versions) == 0 { - apiGroup, err := m.findAPIGroupByNameLocked(groupName) + apiGroup, err := m.findAPIGroupByName(groupName) if err != nil { return err } @@ -157,6 +165,9 @@ func (m *lazyRESTMapper) addKnownGroupAndReload(groupName string, versions ...st } } + m.mu.Lock() + defer m.mu.Unlock() + // Create or fetch group resources from cache. groupResources := &restmapper.APIGroupResources{ Group: metav1.APIGroup{Name: groupName}, @@ -205,43 +216,53 @@ func (m *lazyRESTMapper) addKnownGroupAndReload(groupName string, versions ...st } m.mapper = restmapper.NewDiscoveryRESTMapper(updatedGroupResources) - return nil } // findAPIGroupByNameLocked returns API group by its name. -func (m *lazyRESTMapper) findAPIGroupByNameLocked(groupName string) (metav1.APIGroup, error) { +func (m *mapper) findAPIGroupByName(groupName string) (*metav1.APIGroup, error) { // Looking in the cache first. - for _, apiGroup := range m.apiGroups { - if groupName == apiGroup.Name { - return apiGroup, nil + { + m.mu.RLock() + group, ok := m.apiGroups[groupName] + m.mu.RUnlock() + if ok { + return group, nil } } // Update the cache if nothing was found. apiGroups, err := m.client.ServerGroups() if err != nil { - return metav1.APIGroup{}, fmt.Errorf("failed to get server groups: %w", err) + return nil, fmt.Errorf("failed to get server groups: %w", err) } if len(apiGroups.Groups) == 0 { - return metav1.APIGroup{}, fmt.Errorf("received an empty API groups list") + return nil, fmt.Errorf("received an empty API groups list") } - m.apiGroups = apiGroups.Groups + m.mu.Lock() + for i := range apiGroups.Groups { + group := &apiGroups.Groups[i] + m.apiGroups[group.Name] = group + } + m.mu.Unlock() // Looking in the cache again. - for _, apiGroup := range m.apiGroups { - if groupName == apiGroup.Name { - return apiGroup, nil + { + m.mu.RLock() + group, ok := m.apiGroups[groupName] + m.mu.RUnlock() + if ok { + return group, nil } } // If there is still nothing, return an error. - return metav1.APIGroup{}, fmt.Errorf("failed to find API group %s", groupName) + return nil, fmt.Errorf("failed to find API group %q", groupName) } // fetchGroupVersionResources fetches the resources for the specified group and its versions. -func (m *lazyRESTMapper) fetchGroupVersionResources(groupName string, versions ...string) (map[schema.GroupVersion]*metav1.APIResourceList, error) { +func (m *mapper) fetchGroupVersionResources(groupName string, versions ...string) (map[schema.GroupVersion]*metav1.APIResourceList, error) { groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) failedGroups := make(map[schema.GroupVersion]error) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go index 7d1ed5c96..21067b6f8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -20,11 +20,11 @@ import ( "context" "errors" "fmt" + "net/http" "strings" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -36,6 +36,28 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" ) +// Options are creation options for a Client. +type Options struct { + // HTTPClient is the HTTP client to use for requests. + HTTPClient *http.Client + + // Scheme, if provided, will be used to map go structs to GroupVersionKinds + Scheme *runtime.Scheme + + // Mapper, if provided, will be used to map GroupVersionKinds to Resources + Mapper meta.RESTMapper + + // Cache, if provided, is used to read objects from the cache. + Cache *CacheOptions + + // WarningHandler is used to configure the warning handler responsible for + // surfacing and handling warnings messages sent by the API server. + WarningHandler WarningHandlerOptions + + // DryRun instructs the client to only perform dry run requests. + DryRun *bool +} + // WarningHandlerOptions are options for configuring a // warning handler for the client which is responsible // for surfacing API Server warnings. @@ -50,19 +72,21 @@ type WarningHandlerOptions struct { AllowDuplicateLogs bool } -// Options are creation options for a Client. -type Options struct { - // Scheme, if provided, will be used to map go structs to GroupVersionKinds - Scheme *runtime.Scheme - - // Mapper, if provided, will be used to map GroupVersionKinds to Resources - Mapper meta.RESTMapper - - // Opts is used to configure the warning handler responsible for - // surfacing and handling warnings messages sent by the API server. - Opts WarningHandlerOptions +// CacheOptions are options for creating a cache-backed client. +type CacheOptions struct { + // Reader is a cache-backed reader that will be used to read objects from the cache. + // +required + Reader Reader + // DisableFor is a list of objects that should not be read from the cache. + DisableFor []Object + // Unstructured is a flag that indicates whether the cache-backed client should + // read unstructured objects or lists from the cache. + Unstructured bool } +// NewClientFunc allows a user to define how to create a client. +type NewClientFunc func(config *rest.Config, options Options) (Client, error) + // New returns a new Client using the provided config and Options. // The returned client reads *and* writes directly from the server // (it doesn't use object caches). It understands how to work with @@ -73,8 +97,12 @@ type Options struct { // corresponding group, version, and kind for the given type. In the // case of unstructured types, the group, version, and kind will be extracted // from the corresponding fields on the object. -func New(config *rest.Config, options Options) (Client, error) { - return newClient(config, options) +func New(config *rest.Config, options Options) (c Client, err error) { + c, err = newClient(config, options) + if err == nil && options.DryRun != nil && *options.DryRun { + c = NewDryRunClient(c) + } + return c, err } func newClient(config *rest.Config, options Options) (*client, error) { @@ -82,7 +110,7 @@ func newClient(config *rest.Config, options Options) (*client, error) { return nil, fmt.Errorf("must provide non-nil rest.Config to client.New") } - if !options.Opts.SuppressWarnings { + if !options.WarningHandler.SuppressWarnings { // surface warnings logger := log.Log.WithName("KubeAPIWarningLogger") // Set a WarningHandler, the default WarningHandler @@ -93,11 +121,20 @@ func newClient(config *rest.Config, options Options) (*client, error) { config.WarningHandler = log.NewKubeAPIWarningLogger( logger, log.KubeAPIWarningLoggerOptions{ - Deduplicate: !options.Opts.AllowDuplicateLogs, + Deduplicate: !options.WarningHandler.AllowDuplicateLogs, }, ) } + // Use the rest HTTP client for the provided config if unset + if options.HTTPClient == nil { + var err error + options.HTTPClient, err = rest.HTTPClientFor(config) + if err != nil { + return nil, err + } + } + // Init a scheme if none provided if options.Scheme == nil { options.Scheme = scheme.Scheme @@ -106,34 +143,35 @@ func newClient(config *rest.Config, options Options) (*client, error) { // Init a Mapper if none provided if options.Mapper == nil { var err error - options.Mapper, err = apiutil.NewDynamicRESTMapper(config) + options.Mapper, err = apiutil.NewDynamicRESTMapper(config, options.HTTPClient) if err != nil { return nil, err } } - clientcache := &clientCache{ - config: config, - scheme: options.Scheme, - mapper: options.Mapper, - codecs: serializer.NewCodecFactory(options.Scheme), + resources := &clientRestResources{ + httpClient: options.HTTPClient, + config: config, + scheme: options.Scheme, + mapper: options.Mapper, + codecs: serializer.NewCodecFactory(options.Scheme), structuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), unstructuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), } - rawMetaClient, err := metadata.NewForConfig(config) + rawMetaClient, err := metadata.NewForConfigAndClient(config, options.HTTPClient) if err != nil { return nil, fmt.Errorf("unable to construct metadata-only client for use as part of client: %w", err) } c := &client{ typedClient: typedClient{ - cache: clientcache, + resources: resources, paramCodec: runtime.NewParameterCodec(options.Scheme), }, unstructuredClient: unstructuredClient{ - cache: clientcache, + resources: resources, paramCodec: noConversionParamCodec{}, }, metadataClient: metadataClient{ @@ -143,20 +181,65 @@ func newClient(config *rest.Config, options Options) (*client, error) { scheme: options.Scheme, mapper: options.Mapper, } + if options.Cache == nil || options.Cache.Reader == nil { + return c, nil + } + + // We want a cache if we're here. + // Set the cache. + c.cache = options.Cache.Reader + // Load uncached GVKs. + c.cacheUnstructured = options.Cache.Unstructured + c.uncachedGVKs = map[schema.GroupVersionKind]struct{}{} + for _, obj := range options.Cache.DisableFor { + gvk, err := c.GroupVersionKindFor(obj) + if err != nil { + return nil, err + } + c.uncachedGVKs[gvk] = struct{}{} + } return c, nil } var _ Client = &client{} -// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes -// new clients at the time they are used, and caches the client. +// client is a client.Client that reads and writes directly from/to an API server. +// It lazily initializes new clients at the time they are used. type client struct { typedClient typedClient unstructuredClient unstructuredClient metadataClient metadataClient scheme *runtime.Scheme mapper meta.RESTMapper + + cache Reader + uncachedGVKs map[schema.GroupVersionKind]struct{} + cacheUnstructured bool +} + +func (c *client) shouldBypassCache(obj runtime.Object) (bool, error) { + if c.cache == nil { + return true, nil + } + + gvk, err := c.GroupVersionKindFor(obj) + if err != nil { + return false, err + } + // TODO: this is producing unsafe guesses that don't actually work, + // but it matches ~99% of the cases out there. + if meta.IsListType(obj) { + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + } + if _, isUncached := c.uncachedGVKs[gvk]; isUncached { + return true, nil + } + if !c.cacheUnstructured { + _, isUnstructured := obj.(runtime.Unstructured) + return isUnstructured, nil + } + return false, nil } // resetGroupVersionKind is a helper function to restore and preserve GroupVersionKind on an object. @@ -168,6 +251,16 @@ func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersi } } +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (c *client) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return apiutil.GVKForObject(obj, c.scheme) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (c *client) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return apiutil.IsObjectNamespaced(obj, c.scheme, c.mapper) +} + // Scheme returns the scheme this client is using. func (c *client) Scheme() *runtime.Scheme { return c.scheme @@ -181,7 +274,7 @@ func (c *client) RESTMapper() meta.RESTMapper { // Create implements client.Client. func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) error { switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Create(ctx, obj, opts...) case *metav1.PartialObjectMetadata: return fmt.Errorf("cannot create using only metadata") @@ -194,7 +287,7 @@ func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) e func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Update(ctx, obj, opts...) case *metav1.PartialObjectMetadata: return fmt.Errorf("cannot update using only metadata -- did you mean to patch?") @@ -206,7 +299,7 @@ func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) e // Delete implements client.Client. func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Delete(ctx, obj, opts...) case *metav1.PartialObjectMetadata: return c.metadataClient.Delete(ctx, obj, opts...) @@ -218,7 +311,7 @@ func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) e // DeleteAllOf implements client.Client. func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.DeleteAllOf(ctx, obj, opts...) case *metav1.PartialObjectMetadata: return c.metadataClient.DeleteAllOf(ctx, obj, opts...) @@ -231,7 +324,7 @@ func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllO func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Patch(ctx, obj, patch, opts...) case *metav1.PartialObjectMetadata: return c.metadataClient.Patch(ctx, obj, patch, opts...) @@ -242,8 +335,14 @@ func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...Pat // Get implements client.Client. func (c *client) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + if isUncached, err := c.shouldBypassCache(obj); err != nil { + return err + } else if !isUncached { + return c.cache.Get(ctx, key, obj, opts...) + } + switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Get(ctx, key, obj, opts...) case *metav1.PartialObjectMetadata: // Metadata only object should always preserve the GVK coming in from the caller. @@ -256,8 +355,14 @@ func (c *client) Get(ctx context.Context, key ObjectKey, obj Object, opts ...Get // List implements client.Client. func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + if isUncached, err := c.shouldBypassCache(obj); err != nil { + return err + } else if !isUncached { + return c.cache.List(ctx, obj, opts...) + } + switch x := obj.(type) { - case *unstructured.UnstructuredList: + case runtime.Unstructured: return c.unstructuredClient.List(ctx, obj, opts...) case *metav1.PartialObjectMetadataList: // Metadata only object should always preserve the GVK. @@ -431,7 +536,7 @@ func (po *SubResourcePatchOptions) ApplyToSubResourcePatch(o *SubResourcePatchOp func (sc *subResourceClient) Get(ctx context.Context, obj Object, subResource Object, opts ...SubResourceGetOption) error { switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return sc.client.unstructuredClient.GetSubResource(ctx, obj, subResource, sc.subResource, opts...) case *metav1.PartialObjectMetadata: return errors.New("can not get subresource using only metadata") @@ -446,7 +551,7 @@ func (sc *subResourceClient) Create(ctx context.Context, obj Object, subResource defer sc.client.resetGroupVersionKind(subResource, subResource.GetObjectKind().GroupVersionKind()) switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return sc.client.unstructuredClient.CreateSubResource(ctx, obj, subResource, sc.subResource, opts...) case *metav1.PartialObjectMetadata: return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?") @@ -459,7 +564,7 @@ func (sc *subResourceClient) Create(ctx context.Context, obj Object, subResource func (sc *subResourceClient) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { defer sc.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return sc.client.unstructuredClient.UpdateSubResource(ctx, obj, sc.subResource, opts...) case *metav1.PartialObjectMetadata: return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?") @@ -472,7 +577,7 @@ func (sc *subResourceClient) Update(ctx context.Context, obj Object, opts ...Sub func (sc *subResourceClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { defer sc.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return sc.client.unstructuredClient.PatchSubResource(ctx, obj, sc.subResource, patch, opts...) case *metav1.PartialObjectMetadata: return sc.client.metadataClient.PatchSubResource(ctx, obj, sc.subResource, patch, opts...) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_rest_resources.go similarity index 82% rename from vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/client/client_rest_resources.go index 857a0b38a..2d0787952 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_rest_resources.go @@ -17,12 +17,12 @@ limitations under the License. package client import ( + "net/http" "strings" "sync" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -30,8 +30,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -// clientCache creates and caches rest clients and metadata for Kubernetes types. -type clientCache struct { +// clientRestResources creates and stores rest clients and metadata for Kubernetes types. +type clientRestResources struct { + // httpClient is the http client to use for requests + httpClient *http.Client + // config is the rest.Config to talk to an apiserver config *rest.Config @@ -44,22 +47,22 @@ type clientCache struct { // codecs are used to create a REST client for a gvk codecs serializer.CodecFactory - // structuredResourceByType caches structured type metadata + // structuredResourceByType stores structured type metadata structuredResourceByType map[schema.GroupVersionKind]*resourceMeta - // unstructuredResourceByType caches unstructured type metadata + // unstructuredResourceByType stores unstructured type metadata unstructuredResourceByType map[schema.GroupVersionKind]*resourceMeta mu sync.RWMutex } // newResource maps obj to a Kubernetes Resource and constructs a client for that Resource. // If the object is a list, the resource represents the item's type instead. -func (c *clientCache) newResource(gvk schema.GroupVersionKind, isList, isUnstructured bool) (*resourceMeta, error) { +func (c *clientRestResources) newResource(gvk schema.GroupVersionKind, isList, isUnstructured bool) (*resourceMeta, error) { if strings.HasSuffix(gvk.Kind, "List") && isList { // if this was a list, treat it as a request for the item's resource gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] } - client, err := apiutil.RESTClientForGVK(gvk, isUnstructured, c.config, c.codecs) + client, err := apiutil.RESTClientForGVK(gvk, isUnstructured, c.config, c.codecs, c.httpClient) if err != nil { return nil, err } @@ -72,15 +75,13 @@ func (c *clientCache) newResource(gvk schema.GroupVersionKind, isList, isUnstruc // getResource returns the resource meta information for the given type of object. // If the object is a list, the resource represents the item's type instead. -func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { +func (c *clientRestResources) getResource(obj runtime.Object) (*resourceMeta, error) { gvk, err := apiutil.GVKForObject(obj, c.scheme) if err != nil { return nil, err } - _, isUnstructured := obj.(*unstructured.Unstructured) - _, isUnstructuredList := obj.(*unstructured.UnstructuredList) - isUnstructured = isUnstructured || isUnstructuredList + _, isUnstructured := obj.(runtime.Unstructured) // It's better to do creation work twice than to not let multiple // people make requests at once @@ -108,7 +109,7 @@ func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { } // getObjMeta returns objMeta containing both type and object metadata and state. -func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) { +func (c *clientRestResources) getObjMeta(obj runtime.Object) (*objMeta, error) { r, err := c.getResource(obj) if err != nil { return nil, err @@ -120,7 +121,7 @@ func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) { return &objMeta{resourceMeta: r, Object: m}, err } -// resourceMeta caches state for a Kubernetes type. +// resourceMeta stores state for a Kubernetes type. type resourceMeta struct { // client is the rest client used to talk to the apiserver rest.Interface diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go index e4e8585cb..5f0a6d4b1 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go @@ -98,12 +98,12 @@ func GetConfigWithContext(context string) (*rest.Config, error) { if err != nil { return nil, err } - if cfg.QPS == 0.0 { cfg.QPS = 20.0 - cfg.Burst = 30.0 } - + if cfg.Burst == 0 { + cfg.Burst = 30 + } return cfg, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go index e0e288509..b2e202494 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go @@ -26,8 +26,7 @@ limitations under the License. // to the API server. // // It is a common pattern in Kubernetes to read from a cache and write to the API -// server. This pattern is covered by the DelegatingClient type, which can -// be used to have a client whose Reader is different from the Writer. +// server. This pattern is covered by the creating the Client with a Cache. // // # Options // diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go index 73b56429e..bbcdd3832 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go @@ -21,6 +21,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // NewDryRunClient wraps an existing client and enforces DryRun mode @@ -46,6 +47,16 @@ func (c *dryRunClient) RESTMapper() meta.RESTMapper { return c.client.RESTMapper() } +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (c *dryRunClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return c.client.GroupVersionKindFor(obj) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (c *dryRunClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return c.client.IsObjectNamespaced(obj) +} + // Create implements client.Client. func (c *dryRunClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { return c.client.Create(ctx, obj, append(opts, DryRunAll)...) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go index 4da642319..7167c5505 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go @@ -17,15 +17,25 @@ limitations under the License. package fake import ( + "bytes" "context" "encoding/json" "errors" "fmt" "reflect" + "runtime/debug" "strconv" "strings" "sync" + "time" + // Using v4 to match upstream + jsonpatch "github.com/evanphx/json-patch" + "sigs.k8s.io/controller-runtime/pkg/client/interceptor" + + corev1 "k8s.io/api/core/v1" + policyv1 "k8s.io/api/policy/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,7 +44,10 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" utilrand "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes/scheme" @@ -48,13 +61,15 @@ import ( type versionedTracker struct { testing.ObjectTracker - scheme *runtime.Scheme + scheme *runtime.Scheme + withStatusSubresource sets.Set[schema.GroupVersionKind] } type fakeClient struct { - tracker versionedTracker - scheme *runtime.Scheme - restMapper meta.RESTMapper + tracker versionedTracker + scheme *runtime.Scheme + restMapper meta.RESTMapper + withStatusSubresource sets.Set[schema.GroupVersionKind] // indexes maps each GroupVersionKind (GVK) to the indexes registered for that GVK. // The inner map maps from index name to IndexerFunc. @@ -95,12 +110,14 @@ func NewClientBuilder() *ClientBuilder { // ClientBuilder builds a fake client. type ClientBuilder struct { - scheme *runtime.Scheme - restMapper meta.RESTMapper - initObject []client.Object - initLists []client.ObjectList - initRuntimeObjects []runtime.Object - objectTracker testing.ObjectTracker + scheme *runtime.Scheme + restMapper meta.RESTMapper + initObject []client.Object + initLists []client.ObjectList + initRuntimeObjects []runtime.Object + withStatusSubresource []client.Object + objectTracker testing.ObjectTracker + interceptorFuncs *interceptor.Funcs // indexes maps each GroupVersionKind (GVK) to the indexes registered for that GVK. // The inner map maps from index name to IndexerFunc. @@ -185,6 +202,19 @@ func (f *ClientBuilder) WithIndex(obj runtime.Object, field string, extractValue return f } +// WithStatusSubresource configures the passed object with a status subresource, which means +// calls to Update and Patch will not alter its status. +func (f *ClientBuilder) WithStatusSubresource(o ...client.Object) *ClientBuilder { + f.withStatusSubresource = append(f.withStatusSubresource, o...) + return f +} + +// WithInterceptorFuncs configures the client methods to be intercepted using the provided interceptor.Funcs. +func (f *ClientBuilder) WithInterceptorFuncs(interceptorFuncs interceptor.Funcs) *ClientBuilder { + f.interceptorFuncs = &interceptorFuncs + return f +} + // Build builds and returns a new fake client. func (f *ClientBuilder) Build() client.WithWatch { if f.scheme == nil { @@ -196,10 +226,19 @@ func (f *ClientBuilder) Build() client.WithWatch { var tracker versionedTracker + withStatusSubResource := sets.New(inTreeResourcesWithStatus()...) + for _, o := range f.withStatusSubresource { + gvk, err := apiutil.GVKForObject(o, f.scheme) + if err != nil { + panic(fmt.Errorf("failed to get gvk for object %T: %w", withStatusSubResource, err)) + } + withStatusSubResource.Insert(gvk) + } + if f.objectTracker == nil { - tracker = versionedTracker{ObjectTracker: testing.NewObjectTracker(f.scheme, scheme.Codecs.UniversalDecoder()), scheme: f.scheme} + tracker = versionedTracker{ObjectTracker: testing.NewObjectTracker(f.scheme, scheme.Codecs.UniversalDecoder()), scheme: f.scheme, withStatusSubresource: withStatusSubResource} } else { - tracker = versionedTracker{ObjectTracker: f.objectTracker, scheme: f.scheme} + tracker = versionedTracker{ObjectTracker: f.objectTracker, scheme: f.scheme, withStatusSubresource: withStatusSubResource} } for _, obj := range f.initObject { @@ -217,12 +256,20 @@ func (f *ClientBuilder) Build() client.WithWatch { panic(fmt.Errorf("failed to add runtime object %v to fake client: %w", obj, err)) } } - return &fakeClient{ - tracker: tracker, - scheme: f.scheme, - restMapper: f.restMapper, - indexes: f.indexes, + + var result client.WithWatch = &fakeClient{ + tracker: tracker, + scheme: f.scheme, + restMapper: f.restMapper, + indexes: f.indexes, + withStatusSubresource: withStatusSubResource, + } + + if f.interceptorFuncs != nil { + result = interceptor.NewClient(result, *f.interceptorFuncs) } + + return result } const trackerAddResourceVersion = "999" @@ -243,6 +290,9 @@ func (t versionedTracker) Add(obj runtime.Object) error { if err != nil { return fmt.Errorf("failed to get accessor for object: %w", err) } + if accessor.GetDeletionTimestamp() != nil && len(accessor.GetFinalizers()) == 0 { + return fmt.Errorf("refusing to create obj %s with metadata.deletionTimestamp but no finalizers", accessor.GetName()) + } if accessor.GetResourceVersion() == "" { // We use a "magic" value of 999 here because this field // is parsed as uint and and 0 is already used in Update. @@ -290,20 +340,22 @@ func (t versionedTracker) Create(gvr schema.GroupVersionResource, obj runtime.Ob return nil } -// convertFromUnstructuredIfNecessary will convert *unstructured.Unstructured for a GVK that is recocnized +// convertFromUnstructuredIfNecessary will convert runtime.Unstructured for a GVK that is recognized // by the schema into the whatever the schema produces with New() for said GVK. // This is required because the tracker unconditionally saves on manipulations, but its List() implementation // tries to assign whatever it finds into a ListType it gets from schema.New() - Thus we have to ensure // we save as the very same type, otherwise subsequent List requests will fail. func convertFromUnstructuredIfNecessary(s *runtime.Scheme, o runtime.Object) (runtime.Object, error) { - u, isUnstructured := o.(*unstructured.Unstructured) - if !isUnstructured || !s.Recognizes(u.GroupVersionKind()) { + gvk := o.GetObjectKind().GroupVersionKind() + + u, isUnstructured := o.(runtime.Unstructured) + if !isUnstructured || !s.Recognizes(gvk) { return o, nil } - typed, err := s.New(u.GroupVersionKind()) + typed, err := s.New(gvk) if err != nil { - return nil, fmt.Errorf("scheme recognizes %s but failed to produce an object for it: %w", u.GroupVersionKind().String(), err) + return nil, fmt.Errorf("scheme recognizes %s but failed to produce an object for it: %w", gvk, err) } unstructuredSerialized, err := json.Marshal(u) @@ -318,6 +370,16 @@ func convertFromUnstructuredIfNecessary(s *runtime.Scheme, o runtime.Object) (ru } func (t versionedTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + isStatus := false + // We apply patches using a client-go reaction that ends up calling the trackers Update. As we can't change + // that reaction, we use the callstack to figure out if this originated from the status client. + if bytes.Contains(debug.Stack(), []byte("sigs.k8s.io/controller-runtime/pkg/client/fake.(*fakeSubResourceClient).Patch")) { + isStatus = true + } + return t.update(gvr, obj, ns, isStatus, false) +} + +func (t versionedTracker) update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, isStatus bool, deleting bool) error { accessor, err := meta.Accessor(obj) if err != nil { return fmt.Errorf("failed to get accessor for object: %w", err) @@ -348,6 +410,20 @@ func (t versionedTracker) Update(gvr schema.GroupVersionResource, obj runtime.Ob return err } + if t.withStatusSubresource.Has(gvk) { + if isStatus { // copy everything but status and metadata.ResourceVersion from original object + if err := copyNonStatusFrom(oldObject, obj); err != nil { + return fmt.Errorf("failed to copy non-status field for object with status subresouce: %w", err) + } + } else { // copy status from original object + if err := copyStatusFrom(oldObject, obj); err != nil { + return fmt.Errorf("failed to copy the status for object with status subresource: %w", err) + } + } + } else if isStatus { + return apierrors.NewNotFound(gvr.GroupResource(), accessor.GetName()) + } + oldAccessor, err := meta.Accessor(oldObject) if err != nil { return err @@ -370,6 +446,11 @@ func (t versionedTracker) Update(gvr schema.GroupVersionResource, obj runtime.Ob } intResourceVersion++ accessor.SetResourceVersion(strconv.FormatUint(intResourceVersion, 10)) + + if !deleting && !deletionTimestampEqual(accessor, oldAccessor) { + return fmt.Errorf("error: Unable to edit %s: metadata.deletionTimestamp field is immutable", accessor.GetName()) + } + if !accessor.GetDeletionTimestamp().IsZero() && len(accessor.GetFinalizers()) == 0 { return t.ObjectTracker.Delete(gvr, accessor.GetNamespace(), accessor.GetName()) } @@ -436,7 +517,7 @@ func (c *fakeClient) List(ctx context.Context, obj client.ObjectList, opts ...cl gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") - if _, isUnstructuredList := obj.(*unstructured.UnstructuredList); isUnstructuredList && !c.scheme.Recognizes(gvk) { + if _, isUnstructuredList := obj.(runtime.Unstructured); isUnstructuredList && !c.scheme.Recognizes(gvk) { // We need to register the ListKind with UnstructuredList: // https://github.com/kubernetes/kubernetes/blob/7b2776b89fb1be28d4e9203bdeec079be903c103/staging/src/k8s.io/client-go/dynamic/fake/simple.go#L44-L51 c.schemeWriteLock.Lock() @@ -563,6 +644,16 @@ func (c *fakeClient) RESTMapper() meta.RESTMapper { return c.restMapper } +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (c *fakeClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return apiutil.GVKForObject(obj, c.scheme) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (c *fakeClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return apiutil.IsObjectNamespaced(obj, c.scheme, c.restMapper) +} + func (c *fakeClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { createOptions := &client.CreateOptions{} createOptions.ApplyOptions(opts) @@ -589,6 +680,10 @@ func (c *fakeClient) Create(ctx context.Context, obj client.Object, opts ...clie } accessor.SetName(fmt.Sprintf("%s%s", base, utilrand.String(randomLength))) } + // Ignore attempts to set deletion timestamp + if !accessor.GetDeletionTimestamp().IsZero() { + accessor.SetDeletionTimestamp(nil) + } return c.tracker.Create(gvr, obj, accessor.GetNamespace()) } @@ -679,6 +774,10 @@ func (c *fakeClient) DeleteAllOf(ctx context.Context, obj client.Object, opts .. } func (c *fakeClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + return c.update(obj, false, opts...) +} + +func (c *fakeClient) update(obj client.Object, isStatus bool, opts ...client.UpdateOption) error { updateOptions := &client.UpdateOptions{} updateOptions.ApplyOptions(opts) @@ -696,10 +795,14 @@ func (c *fakeClient) Update(ctx context.Context, obj client.Object, opts ...clie if err != nil { return err } - return c.tracker.Update(gvr, obj, accessor.GetNamespace()) + return c.tracker.update(gvr, obj, accessor.GetNamespace(), isStatus, false) } func (c *fakeClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + return c.patch(obj, patch, opts...) +} + +func (c *fakeClient) patch(obj client.Object, patch client.Patch, opts ...client.PatchOption) error { patchOptions := &client.PatchOptions{} patchOptions.ApplyOptions(opts) @@ -722,19 +825,50 @@ func (c *fakeClient) Patch(ctx context.Context, obj client.Object, patch client. return err } - reaction := testing.ObjectReaction(c.tracker) - handled, o, err := reaction(testing.NewPatchAction(gvr, accessor.GetNamespace(), accessor.GetName(), patch.Type(), data)) + gvk, err := apiutil.GVKForObject(obj, c.scheme) if err != nil { return err } - if !handled { - panic("tracker could not handle patch method") + + oldObj, err := c.tracker.Get(gvr, accessor.GetNamespace(), accessor.GetName()) + if err != nil { + return err + } + oldAccessor, err := meta.Accessor(oldObj) + if err != nil { + return err } - gvk, err := apiutil.GVKForObject(obj, c.scheme) + // Apply patch without updating object. + // To remain in accordance with the behavior of k8s api behavior, + // a patch must not allow for changes to the deletionTimestamp of an object. + // The reaction() function applies the patch to the object and calls Update(), + // whereas dryPatch() replicates this behavior but skips the call to Update(). + // This ensures that the patch may be rejected if a deletionTimestamp is modified, prior + // to updating the object. + action := testing.NewPatchAction(gvr, accessor.GetNamespace(), accessor.GetName(), patch.Type(), data) + o, err := dryPatch(action, c.tracker) + if err != nil { + return err + } + newObj, err := meta.Accessor(o) if err != nil { return err } + + // Validate that deletionTimestamp has not been changed + if !deletionTimestampEqual(newObj, oldAccessor) { + return fmt.Errorf("rejected patch, metadata.deletionTimestamp immutable") + } + + reaction := testing.ObjectReaction(c.tracker) + handled, o, err := reaction(action) + if err != nil { + return err + } + if !handled { + panic("tracker could not handle patch method") + } ta, err := meta.TypeAccessor(o) if err != nil { return err @@ -752,12 +886,178 @@ func (c *fakeClient) Patch(ctx context.Context, obj client.Object, patch client. return err } +// Applying a patch results in a deletionTimestamp that is truncated to the nearest second. +// Check that the diff between a new and old deletion timestamp is within a reasonable threshold +// to be considered unchanged. +func deletionTimestampEqual(newObj metav1.Object, obj metav1.Object) bool { + newTime := newObj.GetDeletionTimestamp() + oldTime := obj.GetDeletionTimestamp() + + if newTime == nil || oldTime == nil { + return newTime == oldTime + } + return newTime.Time.Sub(oldTime.Time).Abs() < time.Second +} + +// The behavior of applying the patch is pulled out into dryPatch(), +// which applies the patch and returns an object, but does not Update() the object. +// This function returns a patched runtime object that may then be validated before a call to Update() is executed. +// This results in some code duplication, but was found to be a cleaner alternative than unmarshalling and introspecting the patch data +// and easier than refactoring the k8s client-go method upstream. +// Duplicate of upstream: https://github.com/kubernetes/client-go/blob/783d0d33626e59d55d52bfd7696b775851f92107/testing/fixture.go#L146-L194 +func dryPatch(action testing.PatchActionImpl, tracker testing.ObjectTracker) (runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + + obj, err := tracker.Get(gvr, ns, action.GetName()) + if err != nil { + return nil, err + } + + old, err := json.Marshal(obj) + if err != nil { + return nil, err + } + + // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields + // in obj that are removed by patch are cleared + value := reflect.ValueOf(obj) + value.Elem().Set(reflect.New(value.Type().Elem()).Elem()) + + switch action.GetPatchType() { + case types.JSONPatchType: + patch, err := jsonpatch.DecodePatch(action.GetPatch()) + if err != nil { + return nil, err + } + modified, err := patch.Apply(old) + if err != nil { + return nil, err + } + + if err = json.Unmarshal(modified, obj); err != nil { + return nil, err + } + case types.MergePatchType: + modified, err := jsonpatch.MergePatch(old, action.GetPatch()) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(modified, obj); err != nil { + return nil, err + } + case types.StrategicMergePatchType, types.ApplyPatchType: + mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) + if err != nil { + return nil, err + } + if err = json.Unmarshal(mergedByte, obj); err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("PatchType is not supported") + } + return obj, nil +} + +func copyNonStatusFrom(old, new runtime.Object) error { + newClientObject, ok := new.(client.Object) + if !ok { + return fmt.Errorf("%T is not a client.Object", new) + } + // The only thing other than status we have to retain + rv := newClientObject.GetResourceVersion() + + oldMapStringAny, err := toMapStringAny(old) + if err != nil { + return fmt.Errorf("failed to convert old to *unstructured.Unstructured: %w", err) + } + newMapStringAny, err := toMapStringAny(new) + if err != nil { + return fmt.Errorf("failed to convert new to *unststructured.Unstructured: %w", err) + } + + // delete everything other than status in case it has fields that were not present in + // the old object + for k := range newMapStringAny { + if k != "status" { + delete(newMapStringAny, k) + } + } + // copy everything other than status from the old object + for k := range oldMapStringAny { + if k != "status" { + newMapStringAny[k] = oldMapStringAny[k] + } + } + + newClientObject.SetResourceVersion(rv) + + if err := fromMapStringAny(newMapStringAny, new); err != nil { + return fmt.Errorf("failed to convert back from map[string]any: %w", err) + } + return nil +} + +// copyStatusFrom copies the status from old into new +func copyStatusFrom(old, new runtime.Object) error { + oldMapStringAny, err := toMapStringAny(old) + if err != nil { + return fmt.Errorf("failed to convert old to *unstructured.Unstructured: %w", err) + } + newMapStringAny, err := toMapStringAny(new) + if err != nil { + return fmt.Errorf("failed to convert new to *unststructured.Unstructured: %w", err) + } + + newMapStringAny["status"] = oldMapStringAny["status"] + + if err := fromMapStringAny(newMapStringAny, new); err != nil { + return fmt.Errorf("failed to convert back from map[string]any: %w", err) + } + + return nil +} + +func toMapStringAny(obj runtime.Object) (map[string]any, error) { + if unstructured, isUnstructured := obj.(*unstructured.Unstructured); isUnstructured { + return unstructured.Object, nil + } + + serialized, err := json.Marshal(obj) + if err != nil { + return nil, err + } + + u := map[string]any{} + return u, json.Unmarshal(serialized, &u) +} + +func fromMapStringAny(u map[string]any, target runtime.Object) error { + if targetUnstructured, isUnstructured := target.(*unstructured.Unstructured); isUnstructured { + targetUnstructured.Object = u + return nil + } + + serialized, err := json.Marshal(u) + if err != nil { + return fmt.Errorf("failed to serialize: %w", err) + } + + if err := json.Unmarshal(serialized, &target); err != nil { + return fmt.Errorf("failed to deserialize: %w", err) + } + + return nil +} + func (c *fakeClient) Status() client.SubResourceWriter { return c.SubResource("status") } func (c *fakeClient) SubResource(subResource string) client.SubResourceClient { - return &fakeSubResourceClient{client: c} + return &fakeSubResourceClient{client: c, subResource: subResource} } func (c *fakeClient) deleteObject(gvr schema.GroupVersionResource, accessor metav1.Object) error { @@ -768,7 +1068,9 @@ func (c *fakeClient) deleteObject(gvr schema.GroupVersionResource, accessor meta if len(oldAccessor.GetFinalizers()) > 0 { now := metav1.Now() oldAccessor.SetDeletionTimestamp(&now) - return c.tracker.Update(gvr, old, accessor.GetNamespace()) + // Call update directly with mutability parameter set to true to allow + // changes to deletionTimestamp + return c.tracker.update(gvr, old, accessor.GetNamespace(), false, true) } } } @@ -787,7 +1089,8 @@ func getGVRFromObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupV } type fakeSubResourceClient struct { - client *fakeClient + client *fakeClient + subResource string } func (sw *fakeSubResourceClient) Get(ctx context.Context, obj, subResource client.Object, opts ...client.SubResourceGetOption) error { @@ -795,12 +1098,26 @@ func (sw *fakeSubResourceClient) Get(ctx context.Context, obj, subResource clien } func (sw *fakeSubResourceClient) Create(ctx context.Context, obj client.Object, subResource client.Object, opts ...client.SubResourceCreateOption) error { - panic("fakeSubResourceWriter does not support create") + switch sw.subResource { + case "eviction": + _, isEviction := subResource.(*policyv1beta1.Eviction) + if !isEviction { + _, isEviction = subResource.(*policyv1.Eviction) + } + if !isEviction { + return apierrors.NewBadRequest(fmt.Sprintf("got invalid type %t, expected Eviction", subResource)) + } + if _, isPod := obj.(*corev1.Pod); !isPod { + return apierrors.NewNotFound(schema.GroupResource{}, "") + } + + return sw.client.Delete(ctx, obj) + default: + return fmt.Errorf("fakeSubResourceWriter does not support create for %s", sw.subResource) + } } func (sw *fakeSubResourceClient) Update(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { - // TODO(droot): This results in full update of the obj (spec + subresources). Need - // a way to update subresource only. updateOptions := client.SubResourceUpdateOptions{} updateOptions.ApplyOptions(opts) @@ -808,13 +1125,10 @@ func (sw *fakeSubResourceClient) Update(ctx context.Context, obj client.Object, if updateOptions.SubResourceBody != nil { body = updateOptions.SubResourceBody } - return sw.client.Update(ctx, body, &updateOptions.UpdateOptions) + return sw.client.update(body, true, &updateOptions.UpdateOptions) } func (sw *fakeSubResourceClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error { - // TODO(droot): This results in full update of the obj (spec + subresources). Need - // a way to update subresource only. - patchOptions := client.SubResourcePatchOptions{} patchOptions.ApplyOptions(opts) @@ -823,7 +1137,7 @@ func (sw *fakeSubResourceClient) Patch(ctx context.Context, obj client.Object, p body = patchOptions.SubResourceBody } - return sw.client.Patch(ctx, body, patch, &patchOptions.PatchOptions) + return sw.client.patch(body, patch, &patchOptions.PatchOptions) } func allowsUnconditionalUpdate(gvk schema.GroupVersionKind) bool { @@ -923,6 +1237,42 @@ func allowsCreateOnUpdate(gvk schema.GroupVersionKind) bool { return false } +func inTreeResourcesWithStatus() []schema.GroupVersionKind { + return []schema.GroupVersionKind{ + {Version: "v1", Kind: "Namespace"}, + {Version: "v1", Kind: "Node"}, + {Version: "v1", Kind: "PersistentVolumeClaim"}, + {Version: "v1", Kind: "PersistentVolume"}, + {Version: "v1", Kind: "Pod"}, + {Version: "v1", Kind: "ReplicationController"}, + {Version: "v1", Kind: "Service"}, + + {Group: "apps", Version: "v1", Kind: "Deployment"}, + {Group: "apps", Version: "v1", Kind: "DaemonSet"}, + {Group: "apps", Version: "v1", Kind: "ReplicaSet"}, + {Group: "apps", Version: "v1", Kind: "StatefulSet"}, + + {Group: "autoscaling", Version: "v1", Kind: "HorizontalPodAutoscaler"}, + + {Group: "batch", Version: "v1", Kind: "CronJob"}, + {Group: "batch", Version: "v1", Kind: "Job"}, + + {Group: "certificates.k8s.io", Version: "v1", Kind: "CertificateSigningRequest"}, + + {Group: "networking.k8s.io", Version: "v1", Kind: "Ingress"}, + {Group: "networking.k8s.io", Version: "v1", Kind: "NetworkPolicy"}, + + {Group: "policy", Version: "v1", Kind: "PodDisruptionBudget"}, + + {Group: "storage.k8s.io", Version: "v1", Kind: "VolumeAttachment"}, + + {Group: "apiextensions.k8s.io", Version: "v1", Kind: "CustomResourceDefinition"}, + + {Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "FlowSchema"}, + {Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfiguration"}, + } +} + // zero zeros the value of a pointer. func zero(x interface{}) { if x == nil { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interceptor/intercept.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interceptor/intercept.go new file mode 100644 index 000000000..3d3f3cb01 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interceptor/intercept.go @@ -0,0 +1,166 @@ +package interceptor + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Funcs contains functions that are called instead of the underlying client's methods. +type Funcs struct { + Get func(ctx context.Context, client client.WithWatch, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error + List func(ctx context.Context, client client.WithWatch, list client.ObjectList, opts ...client.ListOption) error + Create func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.CreateOption) error + Delete func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.DeleteOption) error + DeleteAllOf func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.DeleteAllOfOption) error + Update func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.UpdateOption) error + Patch func(ctx context.Context, client client.WithWatch, obj client.Object, patch client.Patch, opts ...client.PatchOption) error + Watch func(ctx context.Context, client client.WithWatch, obj client.ObjectList, opts ...client.ListOption) (watch.Interface, error) + SubResource func(client client.WithWatch, subResource string) client.SubResourceClient + SubResourceGet func(ctx context.Context, client client.Client, subResourceName string, obj client.Object, subResource client.Object, opts ...client.SubResourceGetOption) error + SubResourceCreate func(ctx context.Context, client client.Client, subResourceName string, obj client.Object, subResource client.Object, opts ...client.SubResourceCreateOption) error + SubResourceUpdate func(ctx context.Context, client client.Client, subResourceName string, obj client.Object, opts ...client.SubResourceUpdateOption) error + SubResourcePatch func(ctx context.Context, client client.Client, subResourceName string, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error +} + +// NewClient returns a new interceptor client that calls the functions in funcs instead of the underlying client's methods, if they are not nil. +func NewClient(interceptedClient client.WithWatch, funcs Funcs) client.WithWatch { + return interceptor{ + client: interceptedClient, + funcs: funcs, + } +} + +type interceptor struct { + client client.WithWatch + funcs Funcs +} + +var _ client.WithWatch = &interceptor{} + +func (c interceptor) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return c.client.GroupVersionKindFor(obj) +} + +func (c interceptor) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return c.client.IsObjectNamespaced(obj) +} + +func (c interceptor) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + if c.funcs.Get != nil { + return c.funcs.Get(ctx, c.client, key, obj, opts...) + } + return c.client.Get(ctx, key, obj, opts...) +} + +func (c interceptor) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + if c.funcs.List != nil { + return c.funcs.List(ctx, c.client, list, opts...) + } + return c.client.List(ctx, list, opts...) +} + +func (c interceptor) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + if c.funcs.Create != nil { + return c.funcs.Create(ctx, c.client, obj, opts...) + } + return c.client.Create(ctx, obj, opts...) +} + +func (c interceptor) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + if c.funcs.Delete != nil { + return c.funcs.Delete(ctx, c.client, obj, opts...) + } + return c.client.Delete(ctx, obj, opts...) +} + +func (c interceptor) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + if c.funcs.Update != nil { + return c.funcs.Update(ctx, c.client, obj, opts...) + } + return c.client.Update(ctx, obj, opts...) +} + +func (c interceptor) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + if c.funcs.Patch != nil { + return c.funcs.Patch(ctx, c.client, obj, patch, opts...) + } + return c.client.Patch(ctx, obj, patch, opts...) +} + +func (c interceptor) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { + if c.funcs.DeleteAllOf != nil { + return c.funcs.DeleteAllOf(ctx, c.client, obj, opts...) + } + return c.client.DeleteAllOf(ctx, obj, opts...) +} + +func (c interceptor) Status() client.SubResourceWriter { + return c.SubResource("status") +} + +func (c interceptor) SubResource(subResource string) client.SubResourceClient { + if c.funcs.SubResource != nil { + return c.funcs.SubResource(c.client, subResource) + } + return subResourceInterceptor{ + subResourceName: subResource, + client: c.client, + funcs: c.funcs, + } +} + +func (c interceptor) Scheme() *runtime.Scheme { + return c.client.Scheme() +} + +func (c interceptor) RESTMapper() meta.RESTMapper { + return c.client.RESTMapper() +} + +func (c interceptor) Watch(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) (watch.Interface, error) { + if c.funcs.Watch != nil { + return c.funcs.Watch(ctx, c.client, obj, opts...) + } + return c.client.Watch(ctx, obj, opts...) +} + +type subResourceInterceptor struct { + subResourceName string + client client.Client + funcs Funcs +} + +var _ client.SubResourceClient = &subResourceInterceptor{} + +func (s subResourceInterceptor) Get(ctx context.Context, obj client.Object, subResource client.Object, opts ...client.SubResourceGetOption) error { + if s.funcs.SubResourceGet != nil { + return s.funcs.SubResourceGet(ctx, s.client, s.subResourceName, obj, subResource, opts...) + } + return s.client.SubResource(s.subResourceName).Get(ctx, obj, subResource, opts...) +} + +func (s subResourceInterceptor) Create(ctx context.Context, obj client.Object, subResource client.Object, opts ...client.SubResourceCreateOption) error { + if s.funcs.SubResourceCreate != nil { + return s.funcs.SubResourceCreate(ctx, s.client, s.subResourceName, obj, subResource, opts...) + } + return s.client.SubResource(s.subResourceName).Create(ctx, obj, subResource, opts...) +} + +func (s subResourceInterceptor) Update(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + if s.funcs.SubResourceUpdate != nil { + return s.funcs.SubResourceUpdate(ctx, s.client, s.subResourceName, obj, opts...) + } + return s.client.SubResource(s.subResourceName).Update(ctx, obj, opts...) +} + +func (s subResourceInterceptor) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error { + if s.funcs.SubResourcePatch != nil { + return s.funcs.SubResourcePatch(ctx, s.client, s.subResourceName, obj, patch, opts...) + } + return s.client.SubResource(s.subResourceName).Patch(ctx, obj, patch, opts...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go index b642f7f88..0ddda3163 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go @@ -20,6 +20,7 @@ import ( "context" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" @@ -169,6 +170,10 @@ type Client interface { Scheme() *runtime.Scheme // RESTMapper returns the rest this client is using. RESTMapper() meta.RESTMapper + // GroupVersionKindFor returns the GroupVersionKind for the given object. + GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) + // IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. + IsObjectNamespaced(obj runtime.Object) (bool, error) } // WithWatch supports Watch on top of the CRUD operations supported by diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go index 00bc2175c..222dc7957 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" + "k8s.io/apimachinery/pkg/runtime/schema" ) // NewNamespacedClient wraps an existing client enforcing the namespace value. @@ -52,9 +52,19 @@ func (n *namespacedClient) RESTMapper() meta.RESTMapper { return n.client.RESTMapper() } +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (n *namespacedClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return n.client.GroupVersionKindFor(obj) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (n *namespacedClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return n.client.IsObjectNamespaced(obj) +} + // Create implements client.Client. func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -72,7 +82,7 @@ func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...Creat // Update implements client.Client. func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -90,7 +100,7 @@ func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...Updat // Delete implements client.Client. func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -108,7 +118,7 @@ func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...Delet // DeleteAllOf implements client.Client. func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -121,7 +131,7 @@ func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ... // Patch implements client.Client. func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -139,7 +149,7 @@ func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, o // Get implements client.Client. func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -180,7 +190,7 @@ type namespacedClientSubResourceClient struct { } func (nsw *namespacedClientSubResourceClient) Get(ctx context.Context, obj, subResource Object, opts ...SubResourceGetOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -198,7 +208,7 @@ func (nsw *namespacedClientSubResourceClient) Get(ctx context.Context, obj, subR } func (nsw *namespacedClientSubResourceClient) Create(ctx context.Context, obj, subResource Object, opts ...SubResourceCreateOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -217,7 +227,7 @@ func (nsw *namespacedClientSubResourceClient) Create(ctx context.Context, obj, s // Update implements client.SubResourceWriter. func (nsw *namespacedClientSubResourceClient) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -235,8 +245,7 @@ func (nsw *namespacedClientSubResourceClient) Update(ctx context.Context, obj Ob // Patch implements client.SubResourceWriter. func (nsw *namespacedClientSubResourceClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) - + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go index 7f6f5b83f..50a461f1c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -606,6 +606,11 @@ func (n InNamespace) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { n.ApplyToList(&opts.ListOptions) } +// AsSelector returns a selector that matches objects in the given namespace. +func (n InNamespace) AsSelector() fields.Selector { + return fields.SelectorFromSet(fields.Set{"metadata.namespace": string(n)}) +} + // Limit specifies the maximum number of results to return from the server. // Limit does not implement DeleteAllOfOption interface because the server // does not support setting it for deletecollection operations. @@ -788,6 +793,11 @@ func (forceOwnership) ApplyToPatch(opts *PatchOptions) { opts.Force = &definitelyTrue } +func (forceOwnership) ApplyToSubResourcePatch(opts *SubResourcePatchOptions) { + definitelyTrue := true + opts.Force = &definitelyTrue +} + // }}} // {{{ DeleteAllOf Options diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go deleted file mode 100644 index 19d1ab4db..000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go +++ /dev/null @@ -1,143 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "context" - "strings" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" -) - -// NewDelegatingClientInput encapsulates the input parameters to create a new delegating client. -type NewDelegatingClientInput struct { - CacheReader Reader - Client Client - UncachedObjects []Object - CacheUnstructured bool -} - -// NewDelegatingClient creates a new delegating client. -// -// A delegating client forms a Client by composing separate reader, writer and -// statusclient interfaces. This way, you can have an Client that reads from a -// cache and writes to the API server. -func NewDelegatingClient(in NewDelegatingClientInput) (Client, error) { - uncachedGVKs := map[schema.GroupVersionKind]struct{}{} - for _, obj := range in.UncachedObjects { - gvk, err := apiutil.GVKForObject(obj, in.Client.Scheme()) - if err != nil { - return nil, err - } - uncachedGVKs[gvk] = struct{}{} - } - - return &delegatingClient{ - scheme: in.Client.Scheme(), - mapper: in.Client.RESTMapper(), - Reader: &delegatingReader{ - CacheReader: in.CacheReader, - ClientReader: in.Client, - scheme: in.Client.Scheme(), - uncachedGVKs: uncachedGVKs, - cacheUnstructured: in.CacheUnstructured, - }, - Writer: in.Client, - StatusClient: in.Client, - SubResourceClientConstructor: in.Client, - }, nil -} - -type delegatingClient struct { - Reader - Writer - StatusClient - SubResourceClientConstructor - - scheme *runtime.Scheme - mapper meta.RESTMapper -} - -// Scheme returns the scheme this client is using. -func (d *delegatingClient) Scheme() *runtime.Scheme { - return d.scheme -} - -// RESTMapper returns the rest mapper this client is using. -func (d *delegatingClient) RESTMapper() meta.RESTMapper { - return d.mapper -} - -// delegatingReader forms a Reader that will cause Get and List requests for -// unstructured types to use the ClientReader while requests for any other type -// of object with use the CacheReader. This avoids accidentally caching the -// entire cluster in the common case of loading arbitrary unstructured objects -// (e.g. from OwnerReferences). -type delegatingReader struct { - CacheReader Reader - ClientReader Reader - - uncachedGVKs map[schema.GroupVersionKind]struct{} - scheme *runtime.Scheme - cacheUnstructured bool -} - -func (d *delegatingReader) shouldBypassCache(obj runtime.Object) (bool, error) { - gvk, err := apiutil.GVKForObject(obj, d.scheme) - if err != nil { - return false, err - } - // TODO: this is producing unsafe guesses that don't actually work, - // but it matches ~99% of the cases out there. - if meta.IsListType(obj) { - gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") - } - if _, isUncached := d.uncachedGVKs[gvk]; isUncached { - return true, nil - } - if !d.cacheUnstructured { - _, isUnstructured := obj.(*unstructured.Unstructured) - _, isUnstructuredList := obj.(*unstructured.UnstructuredList) - return isUnstructured || isUnstructuredList, nil - } - return false, nil -} - -// Get retrieves an obj for a given object key from the Kubernetes Cluster. -func (d *delegatingReader) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { - if isUncached, err := d.shouldBypassCache(obj); err != nil { - return err - } else if isUncached { - return d.ClientReader.Get(ctx, key, obj, opts...) - } - return d.CacheReader.Get(ctx, key, obj, opts...) -} - -// List retrieves list of objects for a given namespace and list options. -func (d *delegatingReader) List(ctx context.Context, list ObjectList, opts ...ListOption) error { - if isUncached, err := d.shouldBypassCache(list); err != nil { - return err - } else if isUncached { - return d.ClientReader.List(ctx, list, opts...) - } - return d.CacheReader.List(ctx, list, opts...) -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go index ade251572..92afd9a9c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go @@ -25,16 +25,14 @@ import ( var _ Reader = &typedClient{} var _ Writer = &typedClient{} -// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes -// new clients at the time they are used, and caches the client. type typedClient struct { - cache *clientCache + resources *clientRestResources paramCodec runtime.ParameterCodec } // Create implements client.Client. func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -53,7 +51,7 @@ func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOpti // Update implements client.Client. func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -73,7 +71,7 @@ func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOpti // Delete implements client.Client. func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -92,7 +90,7 @@ func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOpti // DeleteAllOf implements client.Client. func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -111,7 +109,7 @@ func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...Delet // Patch implements client.Client. func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -136,7 +134,7 @@ func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts . // Get implements client.Client. func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { - r, err := c.cache.getResource(obj) + r, err := c.resources.getResource(obj) if err != nil { return err } @@ -151,7 +149,7 @@ func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts . // List implements client.Client. func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { - r, err := c.cache.getResource(obj) + r, err := c.resources.getResource(obj) if err != nil { return err } @@ -168,7 +166,7 @@ func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOpti } func (c *typedClient) GetSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceGetOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -191,7 +189,7 @@ func (c *typedClient) GetSubResource(ctx context.Context, obj, subResourceObj Ob } func (c *typedClient) CreateSubResource(ctx context.Context, obj Object, subResourceObj Object, subResource string, opts ...SubResourceCreateOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -216,7 +214,7 @@ func (c *typedClient) CreateSubResource(ctx context.Context, obj Object, subReso // UpdateSubResource used by SubResourceWriter to write status. func (c *typedClient) UpdateSubResource(ctx context.Context, obj Object, subResource string, opts ...SubResourceUpdateOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -251,7 +249,7 @@ func (c *typedClient) UpdateSubResource(ctx context.Context, obj Object, subReso // PatchSubResource used by SubResourceWriter to write subresource. func (c *typedClient) PatchSubResource(ctx context.Context, obj Object, subResource string, patch Patch, opts ...SubResourcePatchOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go index 7f25c7be9..b8d4146c9 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go @@ -21,30 +21,27 @@ import ( "fmt" "strings" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" ) var _ Reader = &unstructuredClient{} var _ Writer = &unstructuredClient{} -// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes -// new clients at the time they are used, and caches the client. type unstructuredClient struct { - cache *clientCache + resources *clientRestResources paramCodec runtime.ParameterCodec } // Create implements client.Client. func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { - u, ok := obj.(*unstructured.Unstructured) + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -60,20 +57,20 @@ func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...Cr Do(ctx). Into(obj) - u.SetGroupVersionKind(gvk) + u.GetObjectKind().SetGroupVersionKind(gvk) return result } // Update implements client.Client. func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { - u, ok := obj.(*unstructured.Unstructured) + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -90,17 +87,17 @@ func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...Up Do(ctx). Into(obj) - u.SetGroupVersionKind(gvk) + u.GetObjectKind().SetGroupVersionKind(gvk) return result } // Delete implements client.Client. func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -119,11 +116,11 @@ func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...De // DeleteAllOf implements client.Client. func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -142,11 +139,11 @@ func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts // Patch implements client.Client. func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -171,17 +168,17 @@ func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch // Get implements client.Client. func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { - u, ok := obj.(*unstructured.Unstructured) + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() getOpts := GetOptions{} getOpts.ApplyOptions(opts) - r, err := uc.cache.getResource(obj) + r, err := uc.resources.getResource(obj) if err != nil { return err } @@ -194,22 +191,22 @@ func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object Do(ctx). Into(obj) - u.SetGroupVersionKind(gvk) + u.GetObjectKind().SetGroupVersionKind(gvk) return result } // List implements client.Client. func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { - u, ok := obj.(*unstructured.UnstructuredList) + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") - r, err := uc.cache.getResource(obj) + r, err := uc.resources.getResource(obj) if err != nil { return err } @@ -226,11 +223,11 @@ func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ... } func (uc *unstructuredClient) GetSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceGetOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", subResource) } - if _, ok := subResourceObj.(*unstructured.Unstructured); !ok { + if _, ok := subResourceObj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } @@ -238,7 +235,7 @@ func (uc *unstructuredClient) GetSubResource(ctx context.Context, obj, subResour subResourceObj.SetName(obj.GetName()) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -257,11 +254,11 @@ func (uc *unstructuredClient) GetSubResource(ctx context.Context, obj, subResour } func (uc *unstructuredClient) CreateSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceCreateOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", subResourceObj) } - if _, ok := subResourceObj.(*unstructured.Unstructured); !ok { + if _, ok := subResourceObj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } @@ -269,7 +266,7 @@ func (uc *unstructuredClient) CreateSubResource(ctx context.Context, obj, subRes subResourceObj.SetName(obj.GetName()) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -289,11 +286,11 @@ func (uc *unstructuredClient) CreateSubResource(ctx context.Context, obj, subRes } func (uc *unstructuredClient) UpdateSubResource(ctx context.Context, obj Object, subResource string, opts ...SubResourceUpdateOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -324,14 +321,14 @@ func (uc *unstructuredClient) UpdateSubResource(ctx context.Context, obj Object, } func (uc *unstructuredClient) PatchSubResource(ctx context.Context, obj Object, subResource string, patch Patch, opts ...SubResourcePatchOption) error { - u, ok := obj.(*unstructured.Unstructured) + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -359,6 +356,6 @@ func (uc *unstructuredClient) PatchSubResource(ctx context.Context, obj Object, Do(ctx). Into(body) - u.SetGroupVersionKind(gvk) + u.GetObjectKind().SetGroupVersionKind(gvk) return result } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go index 70490664b..181b22a67 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go @@ -21,9 +21,8 @@ import ( "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" ) @@ -33,21 +32,16 @@ func NewWithWatch(config *rest.Config, options Options) (WithWatch, error) { if err != nil { return nil, err } - dynamicClient, err := dynamic.NewForConfig(config) - if err != nil { - return nil, err - } - return &watchingClient{client: client, dynamic: dynamicClient}, nil + return &watchingClient{client: client}, nil } type watchingClient struct { *client - dynamic dynamic.Interface } func (w *watchingClient) Watch(ctx context.Context, list ObjectList, opts ...ListOption) (watch.Interface, error) { switch l := list.(type) { - case *unstructured.UnstructuredList: + case runtime.Unstructured: return w.unstructuredWatch(ctx, l, opts...) case *metav1.PartialObjectMetadataList: return w.metadataWatch(ctx, l, opts...) @@ -81,25 +75,23 @@ func (w *watchingClient) metadataWatch(ctx context.Context, obj *metav1.PartialO return resInt.Watch(ctx, *listOpts.AsListOptions()) } -func (w *watchingClient) unstructuredWatch(ctx context.Context, obj *unstructured.UnstructuredList, opts ...ListOption) (watch.Interface, error) { - gvk := obj.GroupVersionKind() - gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") - - r, err := w.client.unstructuredClient.cache.getResource(obj) +func (w *watchingClient) unstructuredWatch(ctx context.Context, obj runtime.Unstructured, opts ...ListOption) (watch.Interface, error) { + r, err := w.client.unstructuredClient.resources.getResource(obj) if err != nil { return nil, err } listOpts := w.listOpts(opts...) - if listOpts.Namespace != "" && r.isNamespaced() { - return w.dynamic.Resource(r.mapping.Resource).Namespace(listOpts.Namespace).Watch(ctx, *listOpts.AsListOptions()) - } - return w.dynamic.Resource(r.mapping.Resource).Watch(ctx, *listOpts.AsListOptions()) + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), w.client.unstructuredClient.paramCodec). + Watch(ctx) } func (w *watchingClient) typedWatch(ctx context.Context, obj ObjectList, opts ...ListOption) (watch.Interface, error) { - r, err := w.client.typedClient.cache.getResource(obj) + r, err := w.client.typedClient.resources.getResource(obj) if err != nil { return nil, err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go index 905296cd3..7d00c3c4b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go @@ -19,6 +19,7 @@ package cluster import ( "context" "errors" + "net/http" "time" "github.com/go-logr/logr" @@ -27,6 +28,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" + "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" @@ -37,14 +39,15 @@ import ( // Cluster provides various methods to interact with a cluster. type Cluster interface { - // SetFields will set any dependencies on an object for which the object has implemented the inject - // interface - e.g. inject.Client. - // Deprecated: use the equivalent Options field to set a field. This method will be removed in v0.10. - SetFields(interface{}) error + // GetHTTPClient returns an HTTP client that can be used to talk to the apiserver + GetHTTPClient() *http.Client // GetConfig returns an initialized Config GetConfig() *rest.Config + // GetCache returns a cache.Cache + GetCache() cache.Cache + // GetScheme returns an initialized Scheme GetScheme() *runtime.Scheme @@ -57,9 +60,6 @@ type Cluster interface { // GetFieldIndexer returns a client.FieldIndexer configured with the client GetFieldIndexer() client.FieldIndexer - // GetCache returns a cache.Cache - GetCache() cache.Cache - // GetEventRecorderFor returns a new EventRecorder for the provided name GetEventRecorderFor(name string) record.EventRecorder @@ -83,7 +83,7 @@ type Options struct { Scheme *runtime.Scheme // MapperProvider provides the rest mapper used to map go types to Kubernetes APIs - MapperProvider func(c *rest.Config) (meta.RESTMapper, error) + MapperProvider func(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) // Logger is the logger that should be used by this Cluster. // If none is set, it defaults to log.Log global logger. @@ -103,24 +103,54 @@ type Options struct { // Note: If a namespace is specified, controllers can still Watch for a // cluster-scoped resource (e.g Node). For namespaced resources the cache // will only hold objects from the desired namespace. + // + // Deprecated: Use Cache.Namespaces instead. Namespace string + // HTTPClient is the http client that will be used to create the default + // Cache and Client. If not set the rest.HTTPClientFor function will be used + // to create the http client. + HTTPClient *http.Client + + // Cache is the cache.Options that will be used to create the default Cache. + // By default, the cache will watch and list requested objects in all namespaces. + Cache cache.Options + // NewCache is the function that will create the cache to be used // by the manager. If not set this will use the default new cache function. + // + // When using a custom NewCache, the Cache options will be passed to the + // NewCache function. + // + // NOTE: LOW LEVEL PRIMITIVE! + // Only use a custom NewCache if you know what you are doing. NewCache cache.NewCacheFunc + // Client is the client.Options that will be used to create the default Client. + // By default, the client will use the cache for reads and direct calls for writes. + Client client.Options + // NewClient is the func that creates the client to be used by the manager. - // If not set this will create the default DelegatingClient that will - // use the cache for reads and the client for writes. - // NOTE: The default client will not cache Unstructured. - NewClient NewClientFunc + // If not set this will create a Client backed by a Cache for read operations + // and a direct Client for write operations. + // + // When using a custom NewClient, the Client options will be passed to the + // NewClient function. + // + // NOTE: LOW LEVEL PRIMITIVE! + // Only use a custom NewClient if you know what you are doing. + NewClient client.NewClientFunc // ClientDisableCacheFor tells the client that, if any cache is used, to bypass it // for the given objects. + // + // Deprecated: Use Client.Cache.DisableFor instead. ClientDisableCacheFor []client.Object // DryRunClient specifies whether the client should be configured to enforce // dryRun mode. + // + // Deprecated: Use Client.DryRun instead. DryRunClient bool // EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API @@ -137,7 +167,7 @@ type Options struct { makeBroadcaster intrec.EventBroadcasterProducer // Dependency injection for testing - newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) + newRecorderProvider func(config *rest.Config, httpClient *http.Client, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) } // Option can be used to manipulate Options. @@ -153,52 +183,105 @@ func New(config *rest.Config, opts ...Option) (Cluster, error) { for _, opt := range opts { opt(&options) } - options = setOptionsDefaults(options) + options, err := setOptionsDefaults(options, config) + if err != nil { + options.Logger.Error(err, "Failed to set defaults") + return nil, err + } // Create the mapper provider - mapper, err := options.MapperProvider(config) + mapper, err := options.MapperProvider(config, options.HTTPClient) if err != nil { options.Logger.Error(err, "Failed to get API Group-Resources") return nil, err } // Create the cache for the cached read client and registering informers - cache, err := options.NewCache(config, cache.Options{Scheme: options.Scheme, Mapper: mapper, Resync: options.SyncPeriod, Namespace: options.Namespace}) + cacheOpts := options.Cache + { + if cacheOpts.Scheme == nil { + cacheOpts.Scheme = options.Scheme + } + if cacheOpts.Mapper == nil { + cacheOpts.Mapper = mapper + } + if cacheOpts.HTTPClient == nil { + cacheOpts.HTTPClient = options.HTTPClient + } + if cacheOpts.SyncPeriod == nil { + cacheOpts.SyncPeriod = options.SyncPeriod + } + if len(cacheOpts.Namespaces) == 0 && options.Namespace != "" { + cacheOpts.Namespaces = []string{options.Namespace} + } + } + cache, err := options.NewCache(config, cacheOpts) if err != nil { return nil, err } - clientOptions := client.Options{Scheme: options.Scheme, Mapper: mapper} + // Create the client, and default its options. + clientOpts := options.Client + { + if clientOpts.Scheme == nil { + clientOpts.Scheme = options.Scheme + } + if clientOpts.Mapper == nil { + clientOpts.Mapper = mapper + } + if clientOpts.HTTPClient == nil { + clientOpts.HTTPClient = options.HTTPClient + } + if clientOpts.Cache == nil { + clientOpts.Cache = &client.CacheOptions{ + Unstructured: false, + } + } + if clientOpts.Cache.Reader == nil { + clientOpts.Cache.Reader = cache + } + + // For backward compatibility, the ClientDisableCacheFor option should + // be appended to the DisableFor option in the client. + clientOpts.Cache.DisableFor = append(clientOpts.Cache.DisableFor, options.ClientDisableCacheFor...) - apiReader, err := client.New(config, clientOptions) + if clientOpts.DryRun == nil && options.DryRunClient { + // For backward compatibility, the DryRunClient (if set) option should override + // the DryRun option in the client (if unset). + clientOpts.DryRun = pointer.Bool(true) + } + } + clientWriter, err := options.NewClient(config, clientOpts) if err != nil { return nil, err } - writeObj, err := options.NewClient(cache, config, clientOptions, options.ClientDisableCacheFor...) + // Create the API Reader, a client with no cache. + clientReader, err := client.New(config, client.Options{ + HTTPClient: options.HTTPClient, + Scheme: options.Scheme, + Mapper: mapper, + }) if err != nil { return nil, err } - if options.DryRunClient { - writeObj = client.NewDryRunClient(writeObj) - } - // Create the recorder provider to inject event recorders for the components. // TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific // to the particular controller that it's being injected into, rather than a generic one like is here. - recorderProvider, err := options.newRecorderProvider(config, options.Scheme, options.Logger.WithName("events"), options.makeBroadcaster) + recorderProvider, err := options.newRecorderProvider(config, options.HTTPClient, options.Scheme, options.Logger.WithName("events"), options.makeBroadcaster) if err != nil { return nil, err } return &cluster{ config: config, + httpClient: options.HTTPClient, scheme: options.Scheme, cache: cache, fieldIndexes: cache, - client: writeObj, - apiReader: apiReader, + client: clientWriter, + apiReader: clientReader, recorderProvider: recorderProvider, mapper: mapper, logger: options.Logger, @@ -206,21 +289,27 @@ func New(config *rest.Config, opts ...Option) (Cluster, error) { } // setOptionsDefaults set default values for Options fields. -func setOptionsDefaults(options Options) Options { +func setOptionsDefaults(options Options, config *rest.Config) (Options, error) { + if options.HTTPClient == nil { + var err error + options.HTTPClient, err = rest.HTTPClientFor(config) + if err != nil { + return options, err + } + } + // Use the Kubernetes client-go scheme if none is specified if options.Scheme == nil { options.Scheme = scheme.Scheme } if options.MapperProvider == nil { - options.MapperProvider = func(c *rest.Config) (meta.RESTMapper, error) { - return apiutil.NewDynamicRESTMapper(c) - } + options.MapperProvider = apiutil.NewDynamicRESTMapper } // Allow users to define how to create a new client if options.NewClient == nil { - options.NewClient = DefaultNewClient + options.NewClient = client.New } // Allow newCache to be mocked @@ -250,39 +339,5 @@ func setOptionsDefaults(options Options) Options { options.Logger = logf.RuntimeLog.WithName("cluster") } - return options -} - -// NewClientFunc allows a user to define how to create a client. -type NewClientFunc func(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) - -// ClientOptions are the optional arguments for tuning the caching client. -type ClientOptions struct { - UncachedObjects []client.Object - CacheUnstructured bool -} - -// DefaultNewClient creates the default caching client, that will never cache Unstructured. -func DefaultNewClient(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) { - return ClientBuilderWithOptions(ClientOptions{})(cache, config, options, uncachedObjects...) -} - -// ClientBuilderWithOptions returns a Client constructor that will build a client -// honoring the options argument -func ClientBuilderWithOptions(options ClientOptions) NewClientFunc { - return func(cache cache.Cache, config *rest.Config, clientOpts client.Options, uncachedObjects ...client.Object) (client.Client, error) { - options.UncachedObjects = append(options.UncachedObjects, uncachedObjects...) - - c, err := client.New(config, clientOpts) - if err != nil { - return nil, err - } - - return client.NewDelegatingClient(client.NewDelegatingClientInput{ - CacheReader: cache, - Client: c, - UncachedObjects: options.UncachedObjects, - CacheUnstructured: options.CacheUnstructured, - }) - } + return options, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go index 125e1d144..274276423 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go @@ -18,6 +18,7 @@ package cluster import ( "context" + "net/http" "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/api/meta" @@ -28,22 +29,16 @@ import ( "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) type cluster struct { // config is the rest.config used to talk to the apiserver. Required. config *rest.Config - // scheme is the scheme injected into Controllers, EventHandlers, Sources and Predicates. Defaults - // to scheme.scheme. - scheme *runtime.Scheme - - cache cache.Cache - - // TODO(directxman12): Provide an escape hatch to get individual indexers - // client is the client injected into Controllers (and EventHandlers, Sources and Predicates). - client client.Client + httpClient *http.Client + scheme *runtime.Scheme + cache cache.Cache + client client.Client // apiReader is the reader that will make requests to the api server and not the cache. apiReader client.Reader @@ -64,32 +59,14 @@ type cluster struct { logger logr.Logger } -func (c *cluster) SetFields(i interface{}) error { - if _, err := inject.ConfigInto(c.config, i); err != nil { - return err - } - if _, err := inject.ClientInto(c.client, i); err != nil { - return err - } - if _, err := inject.APIReaderInto(c.apiReader, i); err != nil { - return err - } - if _, err := inject.SchemeInto(c.scheme, i); err != nil { - return err - } - if _, err := inject.CacheInto(c.cache, i); err != nil { - return err - } - if _, err := inject.MapperInto(c.mapper, i); err != nil { - return err - } - return nil -} - func (c *cluster) GetConfig() *rest.Config { return c.config } +func (c *cluster) GetHTTPClient() *http.Client { + return c.httpClient +} + func (c *cluster) GetClient() client.Client { return c.client } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go index 8e853d6a0..9c7b875a8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go @@ -29,6 +29,8 @@ import ( // ControllerManagerConfiguration defines the functions necessary to parse a config file // and to configure the Options struct for the ctrl.Manager. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerManagerConfiguration interface { runtime.Object @@ -38,6 +40,8 @@ type ControllerManagerConfiguration interface { // DeferredFileLoader is used to configure the decoder for loading controller // runtime component config types. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type DeferredFileLoader struct { ControllerManagerConfiguration path string @@ -52,6 +56,8 @@ type DeferredFileLoader struct { // Defaults: // * Path: "./config.yaml" // * Kind: GenericControllerManagerConfiguration +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. func File() *DeferredFileLoader { scheme := runtime.NewScheme() utilruntime.Must(v1alpha1.AddToScheme(scheme)) @@ -83,12 +89,6 @@ func (d *DeferredFileLoader) OfKind(obj ControllerManagerConfiguration) *Deferre return d } -// InjectScheme will configure the scheme to be used for decoding the file. -func (d *DeferredFileLoader) InjectScheme(scheme *runtime.Scheme) error { - d.scheme = scheme - return nil -} - // loadFile is used from the mutex.Once to load the file. func (d *DeferredFileLoader) loadFile() { if d.scheme == nil { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/controller.go new file mode 100644 index 000000000..b37dffaee --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/controller.go @@ -0,0 +1,49 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import "time" + +// Controller contains configuration options for a controller. +type Controller struct { + // GroupKindConcurrency is a map from a Kind to the number of concurrent reconciliation + // allowed for that controller. + // + // When a controller is registered within this manager using the builder utilities, + // users have to specify the type the controller reconciles in the For(...) call. + // If the object's kind passed matches one of the keys in this map, the concurrency + // for that controller is set to the number specified. + // + // The key is expected to be consistent in form with GroupKind.String(), + // e.g. ReplicaSet in apps group (regardless of version) would be `ReplicaSet.apps`. + GroupKindConcurrency map[string]int + + // MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1. + MaxConcurrentReconciles int + + // CacheSyncTimeout refers to the time limit set to wait for syncing caches. + // Defaults to 2 minutes if not set. + CacheSyncTimeout time.Duration + + // RecoverPanic indicates whether the panic caused by reconcile should be recovered. + // Defaults to the Controller.RecoverPanic setting from the Manager if unset. + RecoverPanic *bool + + // NeedLeaderElection indicates whether the controller needs to use leader election. + // Defaults to true, which means the controller will use leader election. + NeedLeaderElection *bool +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go index a169ec559..47a5a2f1d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go @@ -14,12 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package config contains functionality for interacting with ComponentConfig -// files -// -// # DeferredFileLoader -// -// This uses a deferred file decoding allowing you to chain your configuration -// setup. You can pass this into manager.Options#File and it will load your -// config. +// Package config contains functionality for interacting with +// configuration for controller-runtime components. package config diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go index 1e3adbafb..8fdf14d39 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go @@ -17,4 +17,6 @@ limitations under the License. // Package v1alpha1 provides the ControllerManagerConfiguration used for // configuring ctrl.Manager // +kubebuilder:object:generate=true +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. package v1alpha1 diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go index 9efdbc066..ca854bcf3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go @@ -23,12 +23,18 @@ import ( var ( // GroupVersion is group version used to register these objects. + // + // Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. GroupVersion = schema.GroupVersion{Group: "controller-runtime.sigs.k8s.io", Version: "v1alpha1"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + // + // Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} // AddToScheme adds the types in this group-version to the given scheme. + // + // Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. AddToScheme = SchemeBuilder.AddToScheme ) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go index f2226278c..52c8ab300 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go @@ -25,6 +25,8 @@ import ( ) // ControllerManagerConfigurationSpec defines the desired state of GenericControllerManagerConfiguration. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerManagerConfigurationSpec struct { // SyncPeriod determines the minimum frequency at which watched resources are // reconciled. A lower period will correct entropy more quickly, but reduce @@ -60,7 +62,7 @@ type ControllerManagerConfigurationSpec struct { // +optional Controller *ControllerConfigurationSpec `json:"controller,omitempty"` - // Metrics contains thw controller metrics configuration + // Metrics contains the controller metrics configuration // +optional Metrics ControllerMetrics `json:"metrics,omitempty"` @@ -75,6 +77,11 @@ type ControllerManagerConfigurationSpec struct { // ControllerConfigurationSpec defines the global configuration for // controllers registered with the manager. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. +// +// Deprecated: Controller global configuration can now be set at the manager level, +// using the manager.Options.Controller field. type ControllerConfigurationSpec struct { // GroupKindConcurrency is a map from a Kind to the number of concurrent reconciliation // allowed for that controller. @@ -101,6 +108,8 @@ type ControllerConfigurationSpec struct { } // ControllerMetrics defines the metrics configs. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerMetrics struct { // BindAddress is the TCP address that the controller should bind to // for serving prometheus metrics. @@ -110,6 +119,8 @@ type ControllerMetrics struct { } // ControllerHealth defines the health configs. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerHealth struct { // HealthProbeBindAddress is the TCP address that the controller should bind to // for serving health probes @@ -127,6 +138,8 @@ type ControllerHealth struct { } // ControllerWebhook defines the webhook server for the controller. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerWebhook struct { // Port is the port that the webhook server serves at. // It is used to set webhook.Server.Port. @@ -149,6 +162,8 @@ type ControllerWebhook struct { // +kubebuilder:object:root=true // ControllerManagerConfiguration is the Schema for the GenericControllerManagerConfigurations API. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerManagerConfiguration struct { metav1.TypeMeta `json:",inline"` @@ -157,6 +172,8 @@ type ControllerManagerConfiguration struct { } // Complete returns the configuration for controller-runtime. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. func (c *ControllerManagerConfigurationSpec) Complete() (ControllerManagerConfigurationSpec, error) { return *c, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go index fe7f94fdc..6732b6f70 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go @@ -39,6 +39,18 @@ type Options struct { // MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1. MaxConcurrentReconciles int + // CacheSyncTimeout refers to the time limit set to wait for syncing caches. + // Defaults to 2 minutes if not set. + CacheSyncTimeout time.Duration + + // RecoverPanic indicates whether the panic caused by reconcile should be recovered. + // Defaults to the Controller.RecoverPanic setting from the Manager if unset. + RecoverPanic *bool + + // NeedLeaderElection indicates whether the controller needs to use leader election. + // Defaults to true, which means the controller will use leader election. + NeedLeaderElection *bool + // Reconciler reconciles an object Reconciler reconcile.Reconciler @@ -50,14 +62,6 @@ type Options struct { // LogConstructor is used to construct a logger used for this controller and passed // to each reconciliation via the context field. LogConstructor func(request *reconcile.Request) logr.Logger - - // CacheSyncTimeout refers to the time limit set to wait for syncing caches. - // Defaults to 2 minutes if not set. - CacheSyncTimeout time.Duration - - // RecoverPanic indicates whether the panic caused by reconcile should be recovered. - // Defaults to the Controller.RecoverPanic setting from the Manager if unset. - RecoverPanic *bool } // Controller implements a Kubernetes API. A Controller manages a work queue fed reconcile.Requests @@ -124,26 +128,33 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller } if options.MaxConcurrentReconciles <= 0 { - options.MaxConcurrentReconciles = 1 + if mgr.GetControllerOptions().MaxConcurrentReconciles > 0 { + options.MaxConcurrentReconciles = mgr.GetControllerOptions().MaxConcurrentReconciles + } else { + options.MaxConcurrentReconciles = 1 + } } if options.CacheSyncTimeout == 0 { - options.CacheSyncTimeout = 2 * time.Minute + if mgr.GetControllerOptions().CacheSyncTimeout != 0 { + options.CacheSyncTimeout = mgr.GetControllerOptions().CacheSyncTimeout + } else { + options.CacheSyncTimeout = 2 * time.Minute + } } if options.RateLimiter == nil { options.RateLimiter = workqueue.DefaultControllerRateLimiter() } - // Inject dependencies into Reconciler - if err := mgr.SetFields(options.Reconciler); err != nil { - return nil, err - } - if options.RecoverPanic == nil { options.RecoverPanic = mgr.GetControllerOptions().RecoverPanic } + if options.NeedLeaderElection == nil { + options.NeedLeaderElection = mgr.GetControllerOptions().NeedLeaderElection + } + // Create controller with dependencies set return &controller.Controller{ Do: options.Reconciler, @@ -152,10 +163,10 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller }, MaxConcurrentReconciles: options.MaxConcurrentReconciles, CacheSyncTimeout: options.CacheSyncTimeout, - SetFields: mgr.SetFields, Name: name, LogConstructor: options.LogConstructor, RecoverPanic: options.RecoverPanic, + LeaderElected: options.NeedLeaderElection, }, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go index e6d3a4eaa..c72b2e1eb 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go @@ -17,6 +17,8 @@ limitations under the License. package handler import ( + "context" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/event" @@ -36,7 +38,7 @@ var _ EventHandler = &EnqueueRequestForObject{} type EnqueueRequestForObject struct{} // Create implements EventHandler. -func (e *EnqueueRequestForObject) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { +func (e *EnqueueRequestForObject) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) { if evt.Object == nil { enqueueLog.Error(nil, "CreateEvent received with no metadata", "event", evt) return @@ -48,7 +50,7 @@ func (e *EnqueueRequestForObject) Create(evt event.CreateEvent, q workqueue.Rate } // Update implements EventHandler. -func (e *EnqueueRequestForObject) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (e *EnqueueRequestForObject) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) { switch { case evt.ObjectNew != nil: q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ @@ -66,7 +68,7 @@ func (e *EnqueueRequestForObject) Update(evt event.UpdateEvent, q workqueue.Rate } // Delete implements EventHandler. -func (e *EnqueueRequestForObject) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (e *EnqueueRequestForObject) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) { if evt.Object == nil { enqueueLog.Error(nil, "DeleteEvent received with no metadata", "event", evt) return @@ -78,7 +80,7 @@ func (e *EnqueueRequestForObject) Delete(evt event.DeleteEvent, q workqueue.Rate } // Generic implements EventHandler. -func (e *EnqueueRequestForObject) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { +func (e *EnqueueRequestForObject) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) { if evt.Object == nil { enqueueLog.Error(nil, "GenericEvent received with no metadata", "event", evt) return diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go index 17401b1fd..b55fdde6b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go @@ -17,16 +17,17 @@ limitations under the License. package handler import ( + "context" + "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) // MapFunc is the signature required for enqueueing requests from a generic function. // This type is usually used with EnqueueRequestsFromMapFunc when registering an event handler. -type MapFunc func(client.Object) []reconcile.Request +type MapFunc func(context.Context, client.Object) []reconcile.Request // EnqueueRequestsFromMapFunc enqueues Requests by running a transformation function that outputs a collection // of reconcile.Requests on each Event. The reconcile.Requests may be for an arbitrary set of objects @@ -52,32 +53,32 @@ type enqueueRequestsFromMapFunc struct { } // Create implements EventHandler. -func (e *enqueueRequestsFromMapFunc) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestsFromMapFunc) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} - e.mapAndEnqueue(q, evt.Object, reqs) + e.mapAndEnqueue(ctx, q, evt.Object, reqs) } // Update implements EventHandler. -func (e *enqueueRequestsFromMapFunc) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestsFromMapFunc) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} - e.mapAndEnqueue(q, evt.ObjectOld, reqs) - e.mapAndEnqueue(q, evt.ObjectNew, reqs) + e.mapAndEnqueue(ctx, q, evt.ObjectOld, reqs) + e.mapAndEnqueue(ctx, q, evt.ObjectNew, reqs) } // Delete implements EventHandler. -func (e *enqueueRequestsFromMapFunc) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestsFromMapFunc) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} - e.mapAndEnqueue(q, evt.Object, reqs) + e.mapAndEnqueue(ctx, q, evt.Object, reqs) } // Generic implements EventHandler. -func (e *enqueueRequestsFromMapFunc) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestsFromMapFunc) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} - e.mapAndEnqueue(q, evt.Object, reqs) + e.mapAndEnqueue(ctx, q, evt.Object, reqs) } -func (e *enqueueRequestsFromMapFunc) mapAndEnqueue(q workqueue.RateLimitingInterface, object client.Object, reqs map[reconcile.Request]empty) { - for _, req := range e.toRequests(object) { +func (e *enqueueRequestsFromMapFunc) mapAndEnqueue(ctx context.Context, q workqueue.RateLimitingInterface, object client.Object, reqs map[reconcile.Request]empty) { + for _, req := range e.toRequests(ctx, object) { _, ok := reqs[req] if !ok { q.Add(req) @@ -85,13 +86,3 @@ func (e *enqueueRequestsFromMapFunc) mapAndEnqueue(q workqueue.RateLimitingInter } } } - -// EnqueueRequestsFromMapFunc can inject fields into the mapper. - -// InjectFunc implements inject.Injector. -func (e *enqueueRequestsFromMapFunc) InjectFunc(f inject.Func) error { - if f == nil { - return nil - } - return f(e.toRequests) -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go index 63699893f..02e7d756f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go @@ -17,6 +17,7 @@ limitations under the License. package handler import ( + "context" "fmt" "k8s.io/apimachinery/pkg/api/meta" @@ -25,15 +26,18 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) -var _ EventHandler = &EnqueueRequestForOwner{} +var _ EventHandler = &enqueueRequestForOwner{} -var log = logf.RuntimeLog.WithName("eventhandler").WithName("EnqueueRequestForOwner") +var log = logf.RuntimeLog.WithName("eventhandler").WithName("enqueueRequestForOwner") + +// OwnerOption modifies an EnqueueRequestForOwner EventHandler. +type OwnerOption func(e *enqueueRequestForOwner) // EnqueueRequestForOwner enqueues Requests for the Owners of an object. E.g. the object that created // the object that was the source of the Event. @@ -42,13 +46,34 @@ var log = logf.RuntimeLog.WithName("eventhandler").WithName("EnqueueRequestForOw // // - a source.Kind Source with Type of Pod. // -// - a handler.EnqueueRequestForOwner EventHandler with an OwnerType of ReplicaSet and IsController set to true. -type EnqueueRequestForOwner struct { - // OwnerType is the type of the Owner object to look for in OwnerReferences. Only Group and Kind are compared. - OwnerType runtime.Object +// - a handler.enqueueRequestForOwner EventHandler with an OwnerType of ReplicaSet and OnlyControllerOwner set to true. +func EnqueueRequestForOwner(scheme *runtime.Scheme, mapper meta.RESTMapper, ownerType client.Object, opts ...OwnerOption) EventHandler { + e := &enqueueRequestForOwner{ + ownerType: ownerType, + mapper: mapper, + } + if err := e.parseOwnerTypeGroupKind(scheme); err != nil { + panic(err) + } + for _, opt := range opts { + opt(e) + } + return e +} + +// OnlyControllerOwner if provided will only look at the first OwnerReference with Controller: true. +func OnlyControllerOwner() OwnerOption { + return func(e *enqueueRequestForOwner) { + e.isController = true + } +} - // IsController if set will only look at the first OwnerReference with Controller: true. - IsController bool +type enqueueRequestForOwner struct { + // ownerType is the type of the Owner object to look for in OwnerReferences. Only Group and Kind are compared. + ownerType runtime.Object + + // isController if set will only look at the first OwnerReference with Controller: true. + isController bool // groupKind is the cached Group and Kind from OwnerType groupKind schema.GroupKind @@ -58,7 +83,7 @@ type EnqueueRequestForOwner struct { } // Create implements EventHandler. -func (e *EnqueueRequestForOwner) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestForOwner) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.Object, reqs) for req := range reqs { @@ -67,7 +92,7 @@ func (e *EnqueueRequestForOwner) Create(evt event.CreateEvent, q workqueue.RateL } // Update implements EventHandler. -func (e *EnqueueRequestForOwner) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestForOwner) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.ObjectOld, reqs) e.getOwnerReconcileRequest(evt.ObjectNew, reqs) @@ -77,7 +102,7 @@ func (e *EnqueueRequestForOwner) Update(evt event.UpdateEvent, q workqueue.RateL } // Delete implements EventHandler. -func (e *EnqueueRequestForOwner) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestForOwner) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.Object, reqs) for req := range reqs { @@ -86,7 +111,7 @@ func (e *EnqueueRequestForOwner) Delete(evt event.DeleteEvent, q workqueue.RateL } // Generic implements EventHandler. -func (e *EnqueueRequestForOwner) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestForOwner) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.Object, reqs) for req := range reqs { @@ -96,17 +121,17 @@ func (e *EnqueueRequestForOwner) Generic(evt event.GenericEvent, q workqueue.Rat // parseOwnerTypeGroupKind parses the OwnerType into a Group and Kind and caches the result. Returns false // if the OwnerType could not be parsed using the scheme. -func (e *EnqueueRequestForOwner) parseOwnerTypeGroupKind(scheme *runtime.Scheme) error { +func (e *enqueueRequestForOwner) parseOwnerTypeGroupKind(scheme *runtime.Scheme) error { // Get the kinds of the type - kinds, _, err := scheme.ObjectKinds(e.OwnerType) + kinds, _, err := scheme.ObjectKinds(e.ownerType) if err != nil { - log.Error(err, "Could not get ObjectKinds for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType)) + log.Error(err, "Could not get ObjectKinds for OwnerType", "owner type", fmt.Sprintf("%T", e.ownerType)) return err } // Expect only 1 kind. If there is more than one kind this is probably an edge case such as ListOptions. if len(kinds) != 1 { - err := fmt.Errorf("expected exactly 1 kind for OwnerType %T, but found %s kinds", e.OwnerType, kinds) - log.Error(nil, "expected exactly 1 kind for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType), "kinds", kinds) + err := fmt.Errorf("expected exactly 1 kind for OwnerType %T, but found %s kinds", e.ownerType, kinds) + log.Error(nil, "expected exactly 1 kind for OwnerType", "owner type", fmt.Sprintf("%T", e.ownerType), "kinds", kinds) return err } // Cache the Group and Kind for the OwnerType @@ -116,7 +141,7 @@ func (e *EnqueueRequestForOwner) parseOwnerTypeGroupKind(scheme *runtime.Scheme) // getOwnerReconcileRequest looks at object and builds a map of reconcile.Request to reconcile // owners of object that match e.OwnerType. -func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, result map[reconcile.Request]empty) { +func (e *enqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, result map[reconcile.Request]empty) { // Iterate through the OwnerReferences looking for a match on Group and Kind against what was requested // by the user for _, ref := range e.getOwnersReferences(object) { @@ -138,7 +163,7 @@ func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, Name: ref.Name, }} - // if owner is not namespaced then we should set the namespace to the empty + // if owner is not namespaced then we should not set the namespace mapping, err := e.mapper.RESTMapping(e.groupKind, refGV.Version) if err != nil { log.Error(err, "Could not retrieve rest mapping", "kind", e.groupKind) @@ -153,16 +178,16 @@ func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, } } -// getOwnersReferences returns the OwnerReferences for an object as specified by the EnqueueRequestForOwner +// getOwnersReferences returns the OwnerReferences for an object as specified by the enqueueRequestForOwner // - if IsController is true: only take the Controller OwnerReference (if found) // - if IsController is false: take all OwnerReferences. -func (e *EnqueueRequestForOwner) getOwnersReferences(object metav1.Object) []metav1.OwnerReference { +func (e *enqueueRequestForOwner) getOwnersReferences(object metav1.Object) []metav1.OwnerReference { if object == nil { return nil } // If not filtered as Controller only, then use all the OwnerReferences - if !e.IsController { + if !e.isController { return object.GetOwnerReferences() } // If filtered to a Controller, only take the Controller OwnerReference @@ -172,18 +197,3 @@ func (e *EnqueueRequestForOwner) getOwnersReferences(object metav1.Object) []met // No Controller OwnerReference found return nil } - -var _ inject.Scheme = &EnqueueRequestForOwner{} - -// InjectScheme is called by the Controller to provide a singleton scheme to the EnqueueRequestForOwner. -func (e *EnqueueRequestForOwner) InjectScheme(s *runtime.Scheme) error { - return e.parseOwnerTypeGroupKind(s) -} - -var _ inject.Mapper = &EnqueueRequestForOwner{} - -// InjectMapper is called by the Controller to provide the rest mapper used by the manager. -func (e *EnqueueRequestForOwner) InjectMapper(m meta.RESTMapper) error { - e.mapper = m - return nil -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go index 8652d22d7..2f380f4fc 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go @@ -17,6 +17,8 @@ limitations under the License. package handler import ( + "context" + "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/event" ) @@ -41,17 +43,17 @@ import ( // Most users shouldn't need to implement their own EventHandler. type EventHandler interface { // Create is called in response to an create event - e.g. Pod Creation. - Create(event.CreateEvent, workqueue.RateLimitingInterface) + Create(context.Context, event.CreateEvent, workqueue.RateLimitingInterface) // Update is called in response to an update event - e.g. Pod Updated. - Update(event.UpdateEvent, workqueue.RateLimitingInterface) + Update(context.Context, event.UpdateEvent, workqueue.RateLimitingInterface) // Delete is called in response to a delete event - e.g. Pod Deleted. - Delete(event.DeleteEvent, workqueue.RateLimitingInterface) + Delete(context.Context, event.DeleteEvent, workqueue.RateLimitingInterface) // Generic is called in response to an event of an unknown type or a synthetic event triggered as a cron or // external trigger request - e.g. reconcile Autoscaling, or a Webhook. - Generic(event.GenericEvent, workqueue.RateLimitingInterface) + Generic(context.Context, event.GenericEvent, workqueue.RateLimitingInterface) } var _ EventHandler = Funcs{} @@ -60,45 +62,45 @@ var _ EventHandler = Funcs{} type Funcs struct { // Create is called in response to an add event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - CreateFunc func(event.CreateEvent, workqueue.RateLimitingInterface) + CreateFunc func(context.Context, event.CreateEvent, workqueue.RateLimitingInterface) // Update is called in response to an update event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - UpdateFunc func(event.UpdateEvent, workqueue.RateLimitingInterface) + UpdateFunc func(context.Context, event.UpdateEvent, workqueue.RateLimitingInterface) // Delete is called in response to a delete event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - DeleteFunc func(event.DeleteEvent, workqueue.RateLimitingInterface) + DeleteFunc func(context.Context, event.DeleteEvent, workqueue.RateLimitingInterface) // GenericFunc is called in response to a generic event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - GenericFunc func(event.GenericEvent, workqueue.RateLimitingInterface) + GenericFunc func(context.Context, event.GenericEvent, workqueue.RateLimitingInterface) } // Create implements EventHandler. -func (h Funcs) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { +func (h Funcs) Create(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { if h.CreateFunc != nil { - h.CreateFunc(e, q) + h.CreateFunc(ctx, e, q) } } // Delete implements EventHandler. -func (h Funcs) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (h Funcs) Delete(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { if h.DeleteFunc != nil { - h.DeleteFunc(e, q) + h.DeleteFunc(ctx, e, q) } } // Update implements EventHandler. -func (h Funcs) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (h Funcs) Update(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { if h.UpdateFunc != nil { - h.UpdateFunc(e, q) + h.UpdateFunc(ctx, e, q) } } // Generic implements EventHandler. -func (h Funcs) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) { +func (h Funcs) Generic(ctx context.Context, e event.GenericEvent, q workqueue.RateLimitingInterface) { if h.GenericFunc != nil { - h.GenericFunc(e, q) + h.GenericFunc(ctx, e, q) } } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go index f7734695c..83aba28cb 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go @@ -33,12 +33,9 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/source" ) -var _ inject.Injector = &Controller{} - // Controller implements controller.Controller. type Controller struct { // Name is used to uniquely identify a Controller in tracing, logging and monitoring. Name is required. @@ -61,10 +58,6 @@ type Controller struct { // the Queue for processing Queue workqueue.RateLimitingInterface - // SetFields is used to inject dependencies into other objects such as Sources, EventHandlers and Predicates - // Deprecated: the caller should handle injected fields itself. - SetFields func(i interface{}) error - // mu is used to synchronize Controller setup mu sync.Mutex @@ -93,6 +86,9 @@ type Controller struct { // RecoverPanic indicates whether the panic caused by reconcile should be recovered. RecoverPanic *bool + + // LeaderElected indicates whether the controller is leader elected or always running. + LeaderElected *bool } // watchDescription contains all the information necessary to start a watch. @@ -127,19 +123,6 @@ func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prc c.mu.Lock() defer c.mu.Unlock() - // Inject Cache into arguments - if err := c.SetFields(src); err != nil { - return err - } - if err := c.SetFields(evthdler); err != nil { - return err - } - for _, pr := range prct { - if err := c.SetFields(pr); err != nil { - return err - } - } - // Controller hasn't started yet, store the watches locally and return. // // These watches are going to be held on the controller struct until the manager or user calls Start(...). @@ -152,6 +135,14 @@ func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prc return src.Start(c.ctx, evthdler, c.Queue, prct...) } +// NeedLeaderElection implements the manager.LeaderElectionRunnable interface. +func (c *Controller) NeedLeaderElection() bool { + if c.LeaderElected == nil { + return true + } + return *c.LeaderElected +} + // Start implements controller.Controller. func (c *Controller) Start(ctx context.Context) error { // use an IIFE to get proper lock handling @@ -323,7 +314,11 @@ func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) { result, err := c.Reconcile(ctx, req) switch { case err != nil: - c.Queue.AddRateLimited(req) + if errors.Is(err, reconcile.TerminalError(nil)) { + ctrlmetrics.TerminalReconcileErrors.WithLabelValues(c.Name).Inc() + } else { + c.Queue.AddRateLimited(req) + } ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Inc() ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Inc() log.Error(err, "Reconciler error") @@ -351,12 +346,6 @@ func (c *Controller) GetLogger() logr.Logger { return c.LogConstructor(nil) } -// InjectFunc implement SetFields.Injector. -func (c *Controller) InjectFunc(f inject.Func) error { - c.SetFields = f - return nil -} - // updateMetrics updates prometheus metrics within the controller. func (c *Controller) updateMetrics(reconcileTime time.Duration) { ctrlmetrics.ReconcileTime.WithLabelValues(c.Name).Observe(reconcileTime.Seconds()) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go index baec66927..b74ce062b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go @@ -39,6 +39,13 @@ var ( Help: "Total number of reconciliation errors per controller", }, []string{"controller"}) + // TerminalReconcileErrors is a prometheus counter metrics which holds the total + // number of terminal errors from the Reconciler. + TerminalReconcileErrors = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "controller_runtime_terminal_reconcile_errors_total", + Help: "Total number of terminal reconciliation errors per controller", + }, []string{"controller"}) + // ReconcileTime is a prometheus metric which keeps track of the duration // of reconciliations. ReconcileTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{ @@ -67,6 +74,7 @@ func init() { metrics.Registry.MustRegister( ReconcileTotal, ReconcileErrors, + TerminalReconcileErrors, ReconcileTime, WorkerCount, ActiveWorkers, diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go index 7057f3dbe..0189c0432 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go @@ -17,14 +17,9 @@ limitations under the License. package objectutil import ( - "errors" - "fmt" - apimeta "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) // FilterWithLabels returns a copy of the items in objs matching labelSel. @@ -45,34 +40,3 @@ func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtim } return outItems, nil } - -// IsAPINamespaced returns true if the object is namespace scoped. -// For unstructured objects the gvk is found from the object itself. -func IsAPINamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) { - gvk, err := apiutil.GVKForObject(obj, scheme) - if err != nil { - return false, err - } - - return IsAPINamespacedWithGVK(gvk, scheme, restmapper) -} - -// IsAPINamespacedWithGVK returns true if the object having the provided -// GVK is namespace scoped. -func IsAPINamespacedWithGVK(gk schema.GroupVersionKind, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) { - restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gk.Group, Kind: gk.Kind}) - if err != nil { - return false, fmt.Errorf("failed to get restmapping: %w", err) - } - - scope := restmapping.Scope.Name() - - if scope == "" { - return false, errors.New("scope cannot be identified, empty scope returned") - } - - if scope != apimeta.RESTScopeNameRoot { - return true, nil - } - return false, nil -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go index 9d8b2f074..21f0146ba 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go @@ -19,6 +19,7 @@ package recorder import ( "context" "fmt" + "net/http" "sync" "github.com/go-logr/logr" @@ -110,8 +111,12 @@ func (p *Provider) getBroadcaster() record.EventBroadcaster { } // NewProvider create a new Provider instance. -func NewProvider(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster EventBroadcasterProducer) (*Provider, error) { - corev1Client, err := corev1client.NewForConfig(config) +func NewProvider(config *rest.Config, httpClient *http.Client, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster EventBroadcasterProducer) (*Provider, error) { + if httpClient == nil { + panic("httpClient must not be nil") + } + + corev1Client, err := corev1client.NewForConfigAndClient(config, httpClient) if err != nil { return nil, fmt.Errorf("failed to init client: %w", err) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/event_handler.go similarity index 67% rename from vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/event_handler.go index f0cfe212e..ae8404a1f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/event_handler.go @@ -17,6 +17,7 @@ limitations under the License. package internal import ( + "context" "fmt" "k8s.io/client-go/tools/cache" @@ -31,17 +32,39 @@ import ( var log = logf.RuntimeLog.WithName("source").WithName("EventHandler") -var _ cache.ResourceEventHandler = EventHandler{} +// NewEventHandler creates a new EventHandler. +func NewEventHandler(ctx context.Context, queue workqueue.RateLimitingInterface, handler handler.EventHandler, predicates []predicate.Predicate) *EventHandler { + return &EventHandler{ + ctx: ctx, + handler: handler, + queue: queue, + predicates: predicates, + } +} // EventHandler adapts a handler.EventHandler interface to a cache.ResourceEventHandler interface. type EventHandler struct { - EventHandler handler.EventHandler - Queue workqueue.RateLimitingInterface - Predicates []predicate.Predicate + // ctx stores the context that created the event handler + // that is used to propagate cancellation signals to each handler function. + ctx context.Context + + handler handler.EventHandler + queue workqueue.RateLimitingInterface + predicates []predicate.Predicate +} + +// HandlerFuncs converts EventHandler to a ResourceEventHandlerFuncs +// TODO: switch to ResourceEventHandlerDetailedFuncs with client-go 1.27 +func (e *EventHandler) HandlerFuncs() cache.ResourceEventHandlerFuncs { + return cache.ResourceEventHandlerFuncs{ + AddFunc: e.OnAdd, + UpdateFunc: e.OnUpdate, + DeleteFunc: e.OnDelete, + } } // OnAdd creates CreateEvent and calls Create on EventHandler. -func (e EventHandler) OnAdd(obj interface{}) { +func (e *EventHandler) OnAdd(obj interface{}) { c := event.CreateEvent{} // Pull Object out of the object @@ -53,18 +76,20 @@ func (e EventHandler) OnAdd(obj interface{}) { return } - for _, p := range e.Predicates { + for _, p := range e.predicates { if !p.Create(c) { return } } // Invoke create handler - e.EventHandler.Create(c, e.Queue) + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Create(ctx, c, e.queue) } // OnUpdate creates UpdateEvent and calls Update on EventHandler. -func (e EventHandler) OnUpdate(oldObj, newObj interface{}) { +func (e *EventHandler) OnUpdate(oldObj, newObj interface{}) { u := event.UpdateEvent{} if o, ok := oldObj.(client.Object); ok { @@ -84,18 +109,20 @@ func (e EventHandler) OnUpdate(oldObj, newObj interface{}) { return } - for _, p := range e.Predicates { + for _, p := range e.predicates { if !p.Update(u) { return } } // Invoke update handler - e.EventHandler.Update(u, e.Queue) + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Update(ctx, u, e.queue) } // OnDelete creates DeleteEvent and calls Delete on EventHandler. -func (e EventHandler) OnDelete(obj interface{}) { +func (e *EventHandler) OnDelete(obj interface{}) { d := event.DeleteEvent{} // Deal with tombstone events by pulling the object out. Tombstone events wrap the object in a @@ -114,6 +141,9 @@ func (e EventHandler) OnDelete(obj interface{}) { return } + // Set DeleteStateUnknown to true + d.DeleteStateUnknown = true + // Set obj to the tombstone obj obj = tombstone.Obj } @@ -127,12 +157,14 @@ func (e EventHandler) OnDelete(obj interface{}) { return } - for _, p := range e.Predicates { + for _, p := range e.predicates { if !p.Delete(d) { return } } // Invoke delete handler - e.EventHandler.Delete(d, e.Queue) + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Delete(ctx, d, e.queue) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/kind.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/kind.go new file mode 100644 index 000000000..b3a822712 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/kind.go @@ -0,0 +1,117 @@ +package internal + +import ( + "context" + "errors" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). +type Kind struct { + // Type is the type of object to watch. e.g. &v1.Pod{} + Type client.Object + + // Cache used to watch APIs + Cache cache.Cache + + // started may contain an error if one was encountered during startup. If its closed and does not + // contain an error, startup and syncing finished. + started chan error + startCancel func() +} + +// Start is internal and should be called only by the Controller to register an EventHandler with the Informer +// to enqueue reconcile.Requests. +func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, + prct ...predicate.Predicate) error { + if ks.Type == nil { + return fmt.Errorf("must create Kind with a non-nil object") + } + if ks.Cache == nil { + return fmt.Errorf("must create Kind with a non-nil cache") + } + + // cache.GetInformer will block until its context is cancelled if the cache was already started and it can not + // sync that informer (most commonly due to RBAC issues). + ctx, ks.startCancel = context.WithCancel(ctx) + ks.started = make(chan error) + go func() { + var ( + i cache.Informer + lastErr error + ) + + // Tries to get an informer until it returns true, + // an error or the specified context is cancelled or expired. + if err := wait.PollUntilContextCancel(ctx, 10*time.Second, true, func(ctx context.Context) (bool, error) { + // Lookup the Informer from the Cache and add an EventHandler which populates the Queue + i, lastErr = ks.Cache.GetInformer(ctx, ks.Type) + if lastErr != nil { + kindMatchErr := &meta.NoKindMatchError{} + switch { + case errors.As(lastErr, &kindMatchErr): + log.Error(lastErr, "if kind is a CRD, it should be installed before calling Start", + "kind", kindMatchErr.GroupKind) + case runtime.IsNotRegisteredError(lastErr): + log.Error(lastErr, "kind must be registered to the Scheme") + default: + log.Error(lastErr, "failed to get informer from cache") + } + return false, nil // Retry. + } + return true, nil + }); err != nil { + if lastErr != nil { + ks.started <- fmt.Errorf("failed to get informer from cache: %w", lastErr) + return + } + ks.started <- err + return + } + + _, err := i.AddEventHandler(NewEventHandler(ctx, queue, handler, prct).HandlerFuncs()) + if err != nil { + ks.started <- err + return + } + if !ks.Cache.WaitForCacheSync(ctx) { + // Would be great to return something more informative here + ks.started <- errors.New("cache did not sync") + } + close(ks.started) + }() + + return nil +} + +func (ks *Kind) String() string { + if ks.Type != nil { + return fmt.Sprintf("kind source: %T", ks.Type) + } + return "kind source: unknown type" +} + +// WaitForSync implements SyncingSource to allow controllers to wait with starting +// workers until the cache is synced. +func (ks *Kind) WaitForSync(ctx context.Context) error { + select { + case err := <-ks.started: + return err + case <-ctx.Done(): + ks.startCancel() + if errors.Is(ctx.Err(), context.Canceled) { + return nil + } + return fmt.Errorf("timed out waiting for cache to be synced for Kind %T", ks.Type) + } +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go index c82447d91..c27b4305f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go @@ -25,7 +25,7 @@ import ( // loggerPromise knows how to populate a concrete logr.Logger // with options, given an actual base logger later on down the line. type loggerPromise struct { - logger *DelegatingLogSink + logger *delegatingLogSink childPromises []*loggerPromise promisesLock sync.Mutex @@ -33,7 +33,7 @@ type loggerPromise struct { tags []interface{} } -func (p *loggerPromise) WithName(l *DelegatingLogSink, name string) *loggerPromise { +func (p *loggerPromise) WithName(l *delegatingLogSink, name string) *loggerPromise { res := &loggerPromise{ logger: l, name: &name, @@ -47,7 +47,7 @@ func (p *loggerPromise) WithName(l *DelegatingLogSink, name string) *loggerPromi } // WithValues provides a new Logger with the tags appended. -func (p *loggerPromise) WithValues(l *DelegatingLogSink, tags ...interface{}) *loggerPromise { +func (p *loggerPromise) WithValues(l *delegatingLogSink, tags ...interface{}) *loggerPromise { res := &loggerPromise{ logger: l, tags: tags, @@ -84,12 +84,12 @@ func (p *loggerPromise) Fulfill(parentLogSink logr.LogSink) { } } -// DelegatingLogSink is a logsink that delegates to another logr.LogSink. +// delegatingLogSink is a logsink that delegates to another logr.LogSink. // If the underlying promise is not nil, it registers calls to sub-loggers with // the logging factory to be populated later, and returns a new delegating // logger. It expects to have *some* logr.Logger set at all times (generally // a no-op logger before the promises are fulfilled). -type DelegatingLogSink struct { +type delegatingLogSink struct { lock sync.RWMutex logger logr.LogSink promise *loggerPromise @@ -97,7 +97,8 @@ type DelegatingLogSink struct { } // Init implements logr.LogSink. -func (l *DelegatingLogSink) Init(info logr.RuntimeInfo) { +func (l *delegatingLogSink) Init(info logr.RuntimeInfo) { + eventuallyFulfillRoot() l.lock.Lock() defer l.lock.Unlock() l.info = info @@ -106,7 +107,8 @@ func (l *DelegatingLogSink) Init(info logr.RuntimeInfo) { // Enabled tests whether this Logger is enabled. For example, commandline // flags might be used to set the logging verbosity and disable some info // logs. -func (l *DelegatingLogSink) Enabled(level int) bool { +func (l *delegatingLogSink) Enabled(level int) bool { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() return l.logger.Enabled(level) @@ -118,7 +120,8 @@ func (l *DelegatingLogSink) Enabled(level int) bool { // the log line. The key/value pairs can then be used to add additional // variable information. The key/value pairs should alternate string // keys and arbitrary values. -func (l *DelegatingLogSink) Info(level int, msg string, keysAndValues ...interface{}) { +func (l *delegatingLogSink) Info(level int, msg string, keysAndValues ...interface{}) { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() l.logger.Info(level, msg, keysAndValues...) @@ -132,14 +135,16 @@ func (l *DelegatingLogSink) Info(level int, msg string, keysAndValues ...interfa // The msg field should be used to add context to any underlying error, // while the err field should be used to attach the actual error that // triggered this log line, if present. -func (l *DelegatingLogSink) Error(err error, msg string, keysAndValues ...interface{}) { +func (l *delegatingLogSink) Error(err error, msg string, keysAndValues ...interface{}) { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() l.logger.Error(err, msg, keysAndValues...) } // WithName provides a new Logger with the name appended. -func (l *DelegatingLogSink) WithName(name string) logr.LogSink { +func (l *delegatingLogSink) WithName(name string) logr.LogSink { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() @@ -151,7 +156,7 @@ func (l *DelegatingLogSink) WithName(name string) logr.LogSink { return sink } - res := &DelegatingLogSink{logger: l.logger} + res := &delegatingLogSink{logger: l.logger} promise := l.promise.WithName(res, name) res.promise = promise @@ -159,7 +164,8 @@ func (l *DelegatingLogSink) WithName(name string) logr.LogSink { } // WithValues provides a new Logger with the tags appended. -func (l *DelegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { +func (l *delegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() @@ -171,7 +177,7 @@ func (l *DelegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { return sink } - res := &DelegatingLogSink{logger: l.logger} + res := &delegatingLogSink{logger: l.logger} promise := l.promise.WithValues(res, tags...) res.promise = promise @@ -181,16 +187,16 @@ func (l *DelegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { // Fulfill switches the logger over to use the actual logger // provided, instead of the temporary initial one, if this method // has not been previously called. -func (l *DelegatingLogSink) Fulfill(actual logr.LogSink) { +func (l *delegatingLogSink) Fulfill(actual logr.LogSink) { if l.promise != nil { l.promise.Fulfill(actual) } } -// NewDelegatingLogSink constructs a new DelegatingLogSink which uses +// newDelegatingLogSink constructs a new DelegatingLogSink which uses // the given logger before its promise is fulfilled. -func NewDelegatingLogSink(initial logr.LogSink) *DelegatingLogSink { - l := &DelegatingLogSink{ +func newDelegatingLogSink(initial logr.LogSink) *delegatingLogSink { + l := &delegatingLogSink{ logger: initial, promise: &loggerPromise{promisesLock: sync.Mutex{}}, } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go index 082dce3ad..a79151c69 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go @@ -35,7 +35,10 @@ package log import ( "context" - "sync" + "fmt" + "os" + "runtime/debug" + "sync/atomic" "time" "github.com/go-logr/logr" @@ -43,35 +46,24 @@ import ( // SetLogger sets a concrete logging implementation for all deferred Loggers. func SetLogger(l logr.Logger) { - loggerWasSetLock.Lock() - defer loggerWasSetLock.Unlock() - - loggerWasSet = true - dlog.Fulfill(l.GetSink()) + logFullfilled.Store(true) + rootLog.Fulfill(l.GetSink()) } -// It is safe to assume that if this wasn't set within the first 30 seconds of a binaries -// lifetime, it will never get set. The DelegatingLogSink causes a high number of memory -// allocations when not given an actual Logger, so we set a NullLogSink to avoid that. -// -// We need to keep the DelegatingLogSink because we have various inits() that get a logger from -// here. They will always get executed before any code that imports controller-runtime -// has a chance to run and hence to set an actual logger. -func init() { - // Init is blocking, so start a new goroutine - go func() { - time.Sleep(30 * time.Second) - loggerWasSetLock.Lock() - defer loggerWasSetLock.Unlock() - if !loggerWasSet { - dlog.Fulfill(NullLogSink{}) +func eventuallyFulfillRoot() { + if logFullfilled.Load() { + return + } + if time.Since(rootLogCreated).Seconds() >= 30 { + if logFullfilled.CompareAndSwap(false, true) { + fmt.Fprintf(os.Stderr, "[controller-runtime] log.SetLogger(...) was never called, logs will not be displayed:\n%s", debug.Stack()) + SetLogger(logr.New(NullLogSink{})) } - }() + } } var ( - loggerWasSetLock sync.Mutex - loggerWasSet bool + logFullfilled atomic.Bool ) // Log is the base logger used by kubebuilder. It delegates @@ -80,8 +72,10 @@ var ( // the first 30 seconds of a binaries lifetime, it will get // set to a NullLogSink. var ( - dlog = NewDelegatingLogSink(NullLogSink{}) - Log = logr.New(dlog) + rootLog, rootLogCreated = func() (*delegatingLogSink, time.Time) { + return newDelegatingLogSink(NullLogSink{}), time.Now() + }() + Log = logr.New(rootLog) ) // FromContext returns a logger with predefined values from a context.Context. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go index 5ccff8b78..f298229e5 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go @@ -18,11 +18,11 @@ package manager import ( "context" - "crypto/tls" "errors" "fmt" "net" "net/http" + "net/http/pprof" "sync" "sync/atomic" "time" @@ -32,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" kerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/rest" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" @@ -41,12 +40,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/cluster" - "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/config" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/internal/httpserver" intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" "sigs.k8s.io/controller-runtime/pkg/metrics" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook" ) @@ -107,8 +105,11 @@ type controllerManager struct { // Healthz probe handler healthzHandler *healthz.Handler - // controllerOptions are the global controller options. - controllerOptions v1alpha1.ControllerConfigurationSpec + // pprofListener is used to serve pprof + pprofListener net.Listener + + // controllerConfig are the global controller options. + controllerConfig config.Controller // Logger is the logger that should be used by this manager. // If none is set, it defaults to log.Log global logger. @@ -128,18 +129,7 @@ type controllerManager struct { // election was configured. elected chan struct{} - // port is the port that the webhook server serves at. - port int - // host is the hostname that the webhook server binds to. - host string - // CertDir is the directory that contains the server key and certificate. - // if not set, webhook server would look up the server key and certificate in - // {TempDir}/k8s-webhook-server/serving-certs - certDir string - // tlsOpts is used to allow configuring the TLS config used for the webhook server. - tlsOpts []func(*tls.Config) - - webhookServer *webhook.Server + webhookServer webhook.Server // webhookServerOnce will be called in GetWebhookServer() to optionally initialize // webhookServer if unset, and Add() it to controllerManager. webhookServerOnce sync.Once @@ -191,31 +181,9 @@ func (cm *controllerManager) Add(r Runnable) error { } func (cm *controllerManager) add(r Runnable) error { - // Set dependencies on the object - if err := cm.SetFields(r); err != nil { - return err - } return cm.runnables.Add(r) } -// Deprecated: use the equivalent Options field to set a field. This method will be removed in v0.10. -func (cm *controllerManager) SetFields(i interface{}) error { - if err := cm.cluster.SetFields(i); err != nil { - return err - } - if _, err := inject.InjectorInto(cm.SetFields, i); err != nil { - return err - } - if _, err := inject.StopChannelInto(cm.internalProceduresStop, i); err != nil { - return err - } - if _, err := inject.LoggerInto(cm.logger, i); err != nil { - return err - } - - return nil -} - // AddMetricsExtraHandler adds extra handler served on path to the http server that serves metrics. func (cm *controllerManager) AddMetricsExtraHandler(path string, handler http.Handler) error { cm.Lock() @@ -272,6 +240,10 @@ func (cm *controllerManager) AddReadyzCheck(name string, check healthz.Checker) return nil } +func (cm *controllerManager) GetHTTPClient() *http.Client { + return cm.cluster.GetHTTPClient() +} + func (cm *controllerManager) GetConfig() *rest.Config { return cm.cluster.GetConfig() } @@ -304,15 +276,10 @@ func (cm *controllerManager) GetAPIReader() client.Reader { return cm.cluster.GetAPIReader() } -func (cm *controllerManager) GetWebhookServer() *webhook.Server { +func (cm *controllerManager) GetWebhookServer() webhook.Server { cm.webhookServerOnce.Do(func() { if cm.webhookServer == nil { - cm.webhookServer = &webhook.Server{ - Port: cm.port, - Host: cm.host, - CertDir: cm.certDir, - TLSOpts: cm.tlsOpts, - } + panic("webhook should not be nil") } if err := cm.Add(cm.webhookServer); err != nil { panic(fmt.Sprintf("unable to add webhook server to the controller manager: %s", err)) @@ -325,23 +292,29 @@ func (cm *controllerManager) GetLogger() logr.Logger { return cm.logger } -func (cm *controllerManager) GetControllerOptions() v1alpha1.ControllerConfigurationSpec { - return cm.controllerOptions +func (cm *controllerManager) GetControllerOptions() config.Controller { + return cm.controllerConfig } -func (cm *controllerManager) serveMetrics() { +func (cm *controllerManager) addMetricsServer() error { + mux := http.NewServeMux() + srv := httpserver.New(mux) + handler := promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{ ErrorHandling: promhttp.HTTPErrorOnError, }) // TODO(JoelSpeed): Use existing Kubernetes machinery for serving metrics - mux := http.NewServeMux() mux.Handle(defaultMetricsEndpoint, handler) for path, extraHandler := range cm.metricsExtraHandlers { mux.Handle(path, extraHandler) } - server := httpserver.New(mux) - go cm.httpServe("metrics", cm.logger.WithValues("path", defaultMetricsEndpoint), server, cm.metricsListener) + return cm.add(&server{ + Kind: "metrics", + Log: cm.logger.WithValues("path", defaultMetricsEndpoint), + Server: srv, + Listener: cm.metricsListener, + }) } func (cm *controllerManager) serveHealthProbes() { @@ -362,6 +335,24 @@ func (cm *controllerManager) serveHealthProbes() { go cm.httpServe("health probe", cm.logger, server, cm.healthProbeListener) } +func (cm *controllerManager) addPprofServer() error { + mux := http.NewServeMux() + srv := httpserver.New(mux) + + mux.HandleFunc("/debug/pprof/", pprof.Index) + mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + mux.HandleFunc("/debug/pprof/profile", pprof.Profile) + mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + mux.HandleFunc("/debug/pprof/trace", pprof.Trace) + + return cm.add(&server{ + Kind: "pprof", + Log: cm.logger, + Server: srv, + Listener: cm.pprofListener, + }) +} + func (cm *controllerManager) httpServe(kind string, log logr.Logger, server *http.Server, ln net.Listener) { log = log.WithValues("kind", kind, "addr", ln.Addr()) @@ -451,7 +442,9 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) { // (If we don't serve metrics for non-leaders, prometheus will still scrape // the pod but will get a connection refused). if cm.metricsListener != nil { - cm.serveMetrics() + if err := cm.addMetricsServer(); err != nil { + return fmt.Errorf("failed to add metrics server: %w", err) + } } // Serve health probes. @@ -459,6 +452,13 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) { cm.serveHealthProbes() } + // Add pprof server + if cm.pprofListener != nil { + if err := cm.addPprofServer(); err != nil { + return fmt.Errorf("failed to add pprof server: %w", err) + } + } + // First start any webhook servers, which includes conversion, validation, and defaulting // webhooks that are registered. // @@ -466,22 +466,22 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) { // between conversion webhooks and the cache sync (usually initial list) which causes the webhooks // to never start because no cache can be populated. if err := cm.runnables.Webhooks.Start(cm.internalCtx); err != nil { - if !errors.Is(err, wait.ErrWaitTimeout) { - return err + if err != nil { + return fmt.Errorf("failed to start webhooks: %w", err) } } // Start and wait for caches. if err := cm.runnables.Caches.Start(cm.internalCtx); err != nil { - if !errors.Is(err, wait.ErrWaitTimeout) { - return err + if err != nil { + return fmt.Errorf("failed to start caches: %w", err) } } // Start the non-leaderelection Runnables after the cache has synced. if err := cm.runnables.Others.Start(cm.internalCtx); err != nil { - if !errors.Is(err, wait.ErrWaitTimeout) { - return err + if err != nil { + return fmt.Errorf("failed to start other runnables: %w", err) } } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go index 2facb1c91..7e65ef0c3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go @@ -33,6 +33,7 @@ import ( "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/tools/record" "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/cluster" @@ -44,7 +45,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/metrics" "sigs.k8s.io/controller-runtime/pkg/recorder" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook" ) @@ -55,8 +55,7 @@ type Manager interface { cluster.Cluster // Add will set requested dependencies on the component, and cause the component to be - // started when Start is called. Add will inject any dependencies for which the argument - // implements the inject interface - e.g. inject.Client. + // started when Start is called. // Depending on if a Runnable implements LeaderElectionRunnable interface, a Runnable can be run in either // non-leaderelection mode (always running) or leader election mode (managed by leader election if enabled). Add(Runnable) error @@ -88,13 +87,13 @@ type Manager interface { Start(ctx context.Context) error // GetWebhookServer returns a webhook.Server - GetWebhookServer() *webhook.Server + GetWebhookServer() webhook.Server // GetLogger returns this manager's logger. GetLogger() logr.Logger // GetControllerOptions returns controller global configuration options. - GetControllerOptions() v1alpha1.ControllerConfigurationSpec + GetControllerOptions() config.Controller } // Options are the arguments for creating a new Manager. @@ -102,10 +101,44 @@ type Options struct { // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources. // Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better // to pass your own scheme in. See the documentation in pkg/scheme for more information. + // + // If set, the Scheme will be used to create the default Client and Cache. Scheme *runtime.Scheme - // MapperProvider provides the rest mapper used to map go types to Kubernetes APIs - MapperProvider func(c *rest.Config) (meta.RESTMapper, error) + // MapperProvider provides the rest mapper used to map go types to Kubernetes APIs. + // + // If set, the RESTMapper returned by this function is used to create the RESTMapper + // used by the Client and Cache. + MapperProvider func(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) + + // Cache is the cache.Options that will be used to create the default Cache. + // By default, the cache will watch and list requested objects in all namespaces. + Cache cache.Options + + // NewCache is the function that will create the cache to be used + // by the manager. If not set this will use the default new cache function. + // + // When using a custom NewCache, the Cache options will be passed to the + // NewCache function. + // + // NOTE: LOW LEVEL PRIMITIVE! + // Only use a custom NewCache if you know what you are doing. + NewCache cache.NewCacheFunc + + // Client is the client.Options that will be used to create the default Client. + // By default, the client will use the cache for reads and direct calls for writes. + Client client.Options + + // NewClient is the func that creates the client to be used by the manager. + // If not set this will create a Client backed by a Cache for read operations + // and a direct Client for write operations. + // + // When using a custom NewClient, the Client options will be passed to the + // NewClient function. + // + // NOTE: LOW LEVEL PRIMITIVE! + // Only use a custom NewClient if you know what you are doing. + NewClient client.NewClientFunc // SyncPeriod determines the minimum frequency at which watched resources are // reconciled. A lower period will correct entropy more quickly, but reduce @@ -132,6 +165,8 @@ type Options struct { // is "done" with an object, and would otherwise not requeue it, i.e., we // recommend the `Reconcile` function return `reconcile.Result{RequeueAfter: t}`, // instead of `reconcile.Result{}`. + // + // Deprecated: Use Cache.SyncPeriod instead. SyncPeriod *time.Duration // Logger is the logger that should be used by this manager. @@ -217,6 +252,8 @@ type Options struct { // Note: If a namespace is specified, controllers can still Watch for a // cluster-scoped resource (e.g Node). For namespaced resources, the cache // will only hold objects from the desired namespace. + // + // Deprecated: Use Cache.Namespaces instead. Namespace string // MetricsBindAddress is the TCP address that the controller should bind to @@ -235,11 +272,22 @@ type Options struct { // Liveness probe endpoint name, defaults to "healthz" LivenessEndpointName string + // PprofBindAddress is the TCP address that the controller should bind to + // for serving pprof. + // It can be set to "" or "0" to disable the pprof serving. + // Since pprof may contain sensitive information, make sure to protect it + // before exposing it to public. + PprofBindAddress string + // Port is the port that the webhook server serves at. // It is used to set webhook.Server.Port if WebhookServer is not set. + // + // Deprecated: Use WebhookServer instead. A WebhookServer can be created via webhook.NewServer. Port int // Host is the hostname that the webhook server binds to. // It is used to set webhook.Server.Host if WebhookServer is not set. + // + // Deprecated: Use WebhookServer instead. A WebhookServer can be created via webhook.NewServer. Host string // CertDir is the directory that contains the server key and certificate. @@ -247,26 +295,19 @@ type Options struct { // {TempDir}/k8s-webhook-server/serving-certs. The server key and certificate // must be named tls.key and tls.crt, respectively. // It is used to set webhook.Server.CertDir if WebhookServer is not set. + // + // Deprecated: Use WebhookServer instead. A WebhookServer can be created via webhook.NewServer. CertDir string // TLSOpts is used to allow configuring the TLS config used for the webhook server. + // + // Deprecated: Use WebhookServer instead. A WebhookServer can be created via webhook.NewServer. TLSOpts []func(*tls.Config) // WebhookServer is an externally configured webhook.Server. By default, // a Manager will create a default server using Port, Host, and CertDir; // if this is set, the Manager will use this server instead. - WebhookServer *webhook.Server - - // Functions to allow for a user to customize values that will be injected. - - // NewCache is the function that will create the cache to be used - // by the manager. If not set this will use the default new cache function. - NewCache cache.NewCacheFunc - - // NewClient is the func that creates the client to be used by the manager. - // If not set this will create the default DelegatingClient that will - // use the cache for reads and the client for writes. - NewClient cluster.NewClientFunc + WebhookServer webhook.Server // BaseContext is the function that provides Context values to Runnables // managed by the Manager. If a BaseContext function isn't provided, Runnables @@ -275,10 +316,14 @@ type Options struct { // ClientDisableCacheFor tells the client that, if any cache is used, to bypass it // for the given objects. + // + // Deprecated: Use Client.Cache.DisableCacheFor instead. ClientDisableCacheFor []client.Object // DryRunClient specifies whether the client should be configured to enforce // dryRun mode. + // + // Deprecated: Use Client.DryRun instead. DryRunClient bool // EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API @@ -297,7 +342,7 @@ type Options struct { // Controller contains global configuration options for controllers // registered within this manager. // +optional - Controller v1alpha1.ControllerConfigurationSpec + Controller config.Controller // makeBroadcaster allows deferring the creation of the broadcaster to // avoid leaking goroutines if we never call Start on this manager. It also @@ -306,10 +351,11 @@ type Options struct { makeBroadcaster intrec.EventBroadcasterProducer // Dependency injection for testing - newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) + newRecorderProvider func(config *rest.Config, httpClient *http.Client, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) newResourceLock func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) newMetricsListener func(addr string) (net.Listener, error) newHealthProbeListener func(addr string) (net.Listener, error) + newPprofListener func(addr string) (net.Listener, error) } // BaseContextFunc is a function used to provide a base Context to Runnables @@ -353,12 +399,14 @@ func New(config *rest.Config, options Options) (Manager, error) { clusterOptions.MapperProvider = options.MapperProvider clusterOptions.Logger = options.Logger clusterOptions.SyncPeriod = options.SyncPeriod - clusterOptions.Namespace = options.Namespace clusterOptions.NewCache = options.NewCache clusterOptions.NewClient = options.NewClient - clusterOptions.ClientDisableCacheFor = options.ClientDisableCacheFor - clusterOptions.DryRunClient = options.DryRunClient - clusterOptions.EventBroadcaster = options.EventBroadcaster //nolint:staticcheck + clusterOptions.Cache = options.Cache + clusterOptions.Client = options.Client + clusterOptions.Namespace = options.Namespace //nolint:staticcheck + clusterOptions.ClientDisableCacheFor = options.ClientDisableCacheFor //nolint:staticcheck + clusterOptions.DryRunClient = options.DryRunClient //nolint:staticcheck + clusterOptions.EventBroadcaster = options.EventBroadcaster //nolint:staticcheck }) if err != nil { return nil, err @@ -367,7 +415,7 @@ func New(config *rest.Config, options Options) (Manager, error) { // Create the recorder provider to inject event recorders for the components. // TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific // to the particular controller that it's being injected into, rather than a generic one like is here. - recorderProvider, err := options.newRecorderProvider(config, cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) + recorderProvider, err := options.newRecorderProvider(config, cluster.GetHTTPClient(), cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) if err != nil { return nil, err } @@ -381,7 +429,7 @@ func New(config *rest.Config, options Options) (Manager, error) { leaderRecorderProvider = recorderProvider } else { leaderConfig = rest.CopyConfig(options.LeaderElectionConfig) - leaderRecorderProvider, err = options.newRecorderProvider(leaderConfig, cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) + leaderRecorderProvider, err = options.newRecorderProvider(leaderConfig, cluster.GetHTTPClient(), cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) if err != nil { return nil, err } @@ -419,6 +467,13 @@ func New(config *rest.Config, options Options) (Manager, error) { return nil, err } + // Create pprof listener. This will throw an error if the bind + // address is invalid or already in use. + pprofListener, err := options.newPprofListener(options.PprofBindAddress) + if err != nil { + return nil, fmt.Errorf("failed to new pprof listener: %w", err) + } + errChan := make(chan error) runnables := newRunnables(options.BaseContext, errChan) @@ -431,13 +486,9 @@ func New(config *rest.Config, options Options) (Manager, error) { resourceLock: resourceLock, metricsListener: metricsListener, metricsExtraHandlers: metricsExtraHandlers, - controllerOptions: options.Controller, + controllerConfig: options.Controller, logger: options.Logger, elected: make(chan struct{}), - port: options.Port, - host: options.Host, - certDir: options.CertDir, - tlsOpts: options.TLSOpts, webhookServer: options.WebhookServer, leaderElectionID: options.LeaderElectionID, leaseDuration: *options.LeaseDuration, @@ -446,6 +497,7 @@ func New(config *rest.Config, options Options) (Manager, error) { healthProbeListener: healthProbeListener, readinessEndpointName: options.ReadinessEndpointName, livenessEndpointName: options.LivenessEndpointName, + pprofListener: pprofListener, gracefulShutdownTimeout: *options.GracefulShutdownTimeout, internalProceduresStop: make(chan struct{}), leaderElectionStopped: make(chan struct{}), @@ -456,14 +508,14 @@ func New(config *rest.Config, options Options) (Manager, error) { // AndFrom will use a supplied type and convert to Options // any options already set on Options will be ignored, this is used to allow // cli flags to override anything specified in the config file. +// +// Deprecated: This function has been deprecated and will be removed in a future release, +// The Component Configuration package has been unmaintained for over a year and is no longer +// actively developed. Users should migrate to their own configuration format +// and configure Manager.Options directly. +// See https://github.com/kubernetes-sigs/controller-runtime/issues/895 +// for more information, feedback, and comments. func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, error) { - if inj, wantsScheme := loader.(inject.Scheme); wantsScheme { - err := inj.InjectScheme(o.Scheme) - if err != nil { - return o, err - } - } - newObj, err := loader.Complete() if err != nil { return o, err @@ -498,18 +550,23 @@ func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, if o.Port == 0 && newObj.Webhook.Port != nil { o.Port = *newObj.Webhook.Port } - if o.Host == "" && newObj.Webhook.Host != "" { o.Host = newObj.Webhook.Host } - if o.CertDir == "" && newObj.Webhook.CertDir != "" { o.CertDir = newObj.Webhook.CertDir } + if o.WebhookServer == nil { + o.WebhookServer = webhook.NewServer(webhook.Options{ + Port: o.Port, + Host: o.Host, + CertDir: o.CertDir, + }) + } if newObj.Controller != nil { - if o.Controller.CacheSyncTimeout == nil && newObj.Controller.CacheSyncTimeout != nil { - o.Controller.CacheSyncTimeout = newObj.Controller.CacheSyncTimeout + if o.Controller.CacheSyncTimeout == 0 && newObj.Controller.CacheSyncTimeout != nil { + o.Controller.CacheSyncTimeout = *newObj.Controller.CacheSyncTimeout } if len(o.Controller.GroupKindConcurrency) == 0 && len(newObj.Controller.GroupKindConcurrency) > 0 { @@ -521,6 +578,13 @@ func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, } // AndFromOrDie will use options.AndFrom() and will panic if there are errors. +// +// Deprecated: This function has been deprecated and will be removed in a future release, +// The Component Configuration package has been unmaintained for over a year and is no longer +// actively developed. Users should migrate to their own configuration format +// and configure Manager.Options directly. +// See https://github.com/kubernetes-sigs/controller-runtime/issues/895 +// for more information, feedback, and comments. func (o Options) AndFromOrDie(loader config.ControllerManagerConfiguration) Options { o, err := o.AndFrom(loader) if err != nil { @@ -579,6 +643,19 @@ func defaultHealthProbeListener(addr string) (net.Listener, error) { return ln, nil } +// defaultPprofListener creates the default pprof listener bound to the given address. +func defaultPprofListener(addr string) (net.Listener, error) { + if addr == "" || addr == "0" { + return nil, nil + } + + ln, err := net.Listen("tcp", addr) + if err != nil { + return nil, fmt.Errorf("error listening on %s: %w", addr, err) + } + return ln, nil +} + // defaultBaseContext is used as the BaseContext value in Options if one // has not already been set. func defaultBaseContext() context.Context { @@ -639,6 +716,10 @@ func setOptionsDefaults(options Options) Options { options.newHealthProbeListener = defaultHealthProbeListener } + if options.newPprofListener == nil { + options.newPprofListener = defaultPprofListener + } + if options.GracefulShutdownTimeout == nil { gracefulShutdownTimeout := defaultGracefulShutdownPeriod options.GracefulShutdownTimeout = &gracefulShutdownTimeout @@ -652,5 +733,14 @@ func setOptionsDefaults(options Options) Options { options.BaseContext = defaultBaseContext } + if options.WebhookServer == nil { + options.WebhookServer = webhook.NewServer(webhook.Options{ + Host: options.Host, + Port: options.Port, + CertDir: options.CertDir, + TLSOpts: options.TLSOpts, + }) + } + return options } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go index f7b91a209..549741e6e 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go @@ -56,7 +56,7 @@ func (r *runnables) Add(fn Runnable) error { return r.Caches.Add(fn, func(ctx context.Context) bool { return runnable.GetCache().WaitForCacheSync(ctx) }) - case *webhook.Server: + case webhook.Server: return r.Webhooks.Add(fn, nil) case LeaderElectionRunnable: if !runnable.NeedLeaderElection() { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/server.go new file mode 100644 index 000000000..b6509f48f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/server.go @@ -0,0 +1,61 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package manager + +import ( + "context" + "errors" + "net" + "net/http" + + "github.com/go-logr/logr" +) + +// server is a general purpose HTTP server Runnable for a manager +// to serve some internal handlers such as health probes, metrics and profiling. +type server struct { + Kind string + Log logr.Logger + Server *http.Server + Listener net.Listener +} + +func (s *server) Start(ctx context.Context) error { + log := s.Log.WithValues("kind", s.Kind, "addr", s.Listener.Addr()) + + serverShutdown := make(chan struct{}) + go func() { + <-ctx.Done() + log.Info("shutting down server") + if err := s.Server.Shutdown(context.Background()); err != nil { + log.Error(err, "error shutting down server") + } + close(serverShutdown) + }() + + log.Info("starting server") + if err := s.Server.Serve(s.Listener); err != nil && !errors.Is(err, http.ErrServerClosed) { + return err + } + + <-serverShutdown + return nil +} + +func (s *server) NeedLeaderElection() bool { + return false +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go index a8b43ea0a..ff28998c4 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go @@ -18,8 +18,6 @@ package metrics import ( "context" - "net/url" - "time" "github.com/prometheus/client_golang/prometheus" clientmetrics "k8s.io/client-go/tools/metrics" @@ -29,70 +27,9 @@ import ( // that client-go registers metrics. We copy the names and formats // from Kubernetes so that we match the core controllers. -// Metrics subsystem and all of the keys used by the rest client. -const ( - RestClientSubsystem = "rest_client" - LatencyKey = "request_latency_seconds" - ResultKey = "requests_total" -) - var ( // client metrics. - // RequestLatency reports the request latency in seconds per verb/URL. - // Deprecated: This metric is deprecated for removal in a future release: using the URL as a - // dimension results in cardinality explosion for some consumers. It was deprecated upstream - // in k8s v1.14 and hidden in v1.17 via https://github.com/kubernetes/kubernetes/pull/83836. - // It is not registered by default. To register: - // import ( - // clientmetrics "k8s.io/client-go/tools/metrics" - // clmetrics "sigs.k8s.io/controller-runtime/metrics" - // ) - // - // func init() { - // clmetrics.Registry.MustRegister(clmetrics.RequestLatency) - // clientmetrics.Register(clientmetrics.RegisterOpts{ - // RequestLatency: clmetrics.LatencyAdapter - // }) - // } - RequestLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Subsystem: RestClientSubsystem, - Name: LatencyKey, - Help: "Request latency in seconds. Broken down by verb and URL.", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 10), - }, []string{"verb", "url"}) - - // requestLatency is a Prometheus Histogram metric type partitioned by - // "verb", and "host" labels. It is used for the rest client latency metrics. - requestLatency = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "rest_client_request_duration_seconds", - Help: "Request latency in seconds. Broken down by verb, and host.", - Buckets: []float64{0.005, 0.025, 0.1, 0.25, 0.5, 1.0, 2.0, 4.0, 8.0, 15.0, 30.0, 60.0}, - }, - []string{"verb", "host"}, - ) - - requestSize = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "rest_client_request_size_bytes", - Help: "Request size in bytes. Broken down by verb and host.", - // 64 bytes to 16MB - Buckets: []float64{64, 256, 512, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216}, - }, - []string{"verb", "host"}, - ) - - responseSize = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "rest_client_response_size_bytes", - Help: "Response size in bytes. Broken down by verb and host.", - // 64 bytes to 16MB - Buckets: []float64{64, 256, 512, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216}, - }, - []string{"verb", "host"}, - ) - requestResult = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "rest_client_requests_total", @@ -109,17 +46,11 @@ func init() { // registerClientMetrics sets up the client latency metrics from client-go. func registerClientMetrics() { // register the metrics with our registry - Registry.MustRegister(requestLatency) - Registry.MustRegister(requestSize) - Registry.MustRegister(responseSize) Registry.MustRegister(requestResult) // register the metrics with client-go clientmetrics.Register(clientmetrics.RegisterOpts{ - RequestLatency: &LatencyAdapter{metric: requestLatency}, - RequestSize: &sizeAdapter{metric: requestSize}, - ResponseSize: &sizeAdapter{metric: responseSize}, - RequestResult: &resultAdapter{metric: requestResult}, + RequestResult: &resultAdapter{metric: requestResult}, }) } @@ -131,24 +62,6 @@ func registerClientMetrics() { // copied (more-or-less directly) from k8s.io/kubernetes setup code // (which isn't anywhere in an easily-importable place). -// LatencyAdapter implements LatencyMetric. -type LatencyAdapter struct { - metric *prometheus.HistogramVec -} - -// Observe increments the request latency metric for the given verb/URL. -func (l *LatencyAdapter) Observe(_ context.Context, verb string, u url.URL, latency time.Duration) { - l.metric.WithLabelValues(verb, u.String()).Observe(latency.Seconds()) -} - -type sizeAdapter struct { - metric *prometheus.HistogramVec -} - -func (s *sizeAdapter) Observe(ctx context.Context, verb string, host string, size float64) { - s.metric.WithLabelValues(verb, host).Observe(size) -} - type resultAdapter struct { metric *prometheus.CounterVec } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go index 8b0f3634e..314635875 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go @@ -24,7 +24,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) var log = logf.RuntimeLog.WithName("predicate").WithName("eventFilters") @@ -242,15 +241,6 @@ type and struct { predicates []Predicate } -func (a and) InjectFunc(f inject.Func) error { - for _, p := range a.predicates { - if err := f(p); err != nil { - return err - } - } - return nil -} - func (a and) Create(e event.CreateEvent) bool { for _, p := range a.predicates { if !p.Create(e) { @@ -296,15 +286,6 @@ type or struct { predicates []Predicate } -func (o or) InjectFunc(f inject.Func) error { - for _, p := range o.predicates { - if err := f(p); err != nil { - return err - } - } - return nil -} - func (o or) Create(e event.CreateEvent) bool { for _, p := range o.predicates { if p.Create(e) { @@ -350,10 +331,6 @@ type not struct { predicate Predicate } -func (n not) InjectFunc(f inject.Func) error { - return f(n.predicate) -} - func (n not) Create(e event.CreateEvent) bool { return !n.predicate.Create(e) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go index 8285e2ca9..d51cfc34a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go @@ -18,6 +18,7 @@ package reconcile import ( "context" + "errors" "time" "k8s.io/apimachinery/pkg/types" @@ -100,3 +101,26 @@ var _ Reconciler = Func(nil) // Reconcile implements Reconciler. func (r Func) Reconcile(ctx context.Context, o Request) (Result, error) { return r(ctx, o) } + +// TerminalError is an error that will not be retried but still be logged +// and recorded in metrics. +func TerminalError(wrapped error) error { + return &terminalError{err: wrapped} +} + +type terminalError struct { + err error +} + +func (te *terminalError) Unwrap() error { + return te.err +} + +func (te *terminalError) Error() string { + return "terminal error: " + te.err.Error() +} + +func (te *terminalError) Is(target error) bool { + tp := &terminalError{} + return errors.As(target, &tp) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go deleted file mode 100644 index c8c56ba81..000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package inject is used by a Manager to inject types into Sources, EventHandlers, Predicates, and Reconciles. -// Deprecated: Use manager.Options fields directly. This package will be removed in v0.10. -package inject - -import ( - "github.com/go-logr/logr" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/rest" - - "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// Cache is used by the ControllerManager to inject Cache into Sources, EventHandlers, Predicates, and -// Reconciles. -type Cache interface { - InjectCache(cache cache.Cache) error -} - -// CacheInto will set informers on i and return the result if it implements Cache. Returns -// false if i does not implement Cache. -func CacheInto(c cache.Cache, i interface{}) (bool, error) { - if s, ok := i.(Cache); ok { - return true, s.InjectCache(c) - } - return false, nil -} - -// APIReader is used by the Manager to inject the APIReader into necessary types. -type APIReader interface { - InjectAPIReader(client.Reader) error -} - -// APIReaderInto will set APIReader on i and return the result if it implements APIReaderInto. -// Returns false if i does not implement APIReader. -func APIReaderInto(reader client.Reader, i interface{}) (bool, error) { - if s, ok := i.(APIReader); ok { - return true, s.InjectAPIReader(reader) - } - return false, nil -} - -// Config is used by the ControllerManager to inject Config into Sources, EventHandlers, Predicates, and -// Reconciles. -type Config interface { - InjectConfig(*rest.Config) error -} - -// ConfigInto will set config on i and return the result if it implements Config. Returns -// false if i does not implement Config. -func ConfigInto(config *rest.Config, i interface{}) (bool, error) { - if s, ok := i.(Config); ok { - return true, s.InjectConfig(config) - } - return false, nil -} - -// Client is used by the ControllerManager to inject client into Sources, EventHandlers, Predicates, and -// Reconciles. -type Client interface { - InjectClient(client.Client) error -} - -// ClientInto will set client on i and return the result if it implements Client. Returns -// false if i does not implement Client. -func ClientInto(client client.Client, i interface{}) (bool, error) { - if s, ok := i.(Client); ok { - return true, s.InjectClient(client) - } - return false, nil -} - -// Scheme is used by the ControllerManager to inject Scheme into Sources, EventHandlers, Predicates, and -// Reconciles. -type Scheme interface { - InjectScheme(scheme *runtime.Scheme) error -} - -// SchemeInto will set scheme and return the result on i if it implements Scheme. Returns -// false if i does not implement Scheme. -func SchemeInto(scheme *runtime.Scheme, i interface{}) (bool, error) { - if is, ok := i.(Scheme); ok { - return true, is.InjectScheme(scheme) - } - return false, nil -} - -// Stoppable is used by the ControllerManager to inject stop channel into Sources, -// EventHandlers, Predicates, and Reconciles. -type Stoppable interface { - InjectStopChannel(<-chan struct{}) error -} - -// StopChannelInto will set stop channel on i and return the result if it implements Stoppable. -// Returns false if i does not implement Stoppable. -func StopChannelInto(stop <-chan struct{}, i interface{}) (bool, error) { - if s, ok := i.(Stoppable); ok { - return true, s.InjectStopChannel(stop) - } - return false, nil -} - -// Mapper is used to inject the rest mapper to components that may need it. -type Mapper interface { - InjectMapper(meta.RESTMapper) error -} - -// MapperInto will set the rest mapper on i and return the result if it implements Mapper. -// Returns false if i does not implement Mapper. -func MapperInto(mapper meta.RESTMapper, i interface{}) (bool, error) { - if m, ok := i.(Mapper); ok { - return true, m.InjectMapper(mapper) - } - return false, nil -} - -// Func injects dependencies into i. -type Func func(i interface{}) error - -// Injector is used by the ControllerManager to inject Func into Controllers. -type Injector interface { - InjectFunc(f Func) error -} - -// InjectorInto will set f and return the result on i if it implements Injector. Returns -// false if i does not implement Injector. -func InjectorInto(f Func, i interface{}) (bool, error) { - if ii, ok := i.(Injector); ok { - return true, ii.InjectFunc(f) - } - return false, nil -} - -// Logger is used to inject Loggers into components that need them -// and don't otherwise have opinions. -type Logger interface { - InjectLogger(l logr.Logger) error -} - -// LoggerInto will set the logger on the given object if it implements inject.Logger, -// returning true if a InjectLogger was called, and false otherwise. -func LoggerInto(l logr.Logger, i interface{}) (bool, error) { - if injectable, wantsLogger := i.(Logger); wantsLogger { - return true, injectable.InjectLogger(l) - } - return false, nil -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go index 6b6756392..099c8d68f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go @@ -18,28 +18,19 @@ package source import ( "context" - "errors" "fmt" "sync" - "time" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" - logf "sigs.k8s.io/controller-runtime/pkg/internal/log" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" - "sigs.k8s.io/controller-runtime/pkg/source/internal" + internal "sigs.k8s.io/controller-runtime/pkg/internal/source" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/predicate" ) -var log = logf.RuntimeLog.WithName("source") - const ( // defaultBufferSize is the default number of event notifications that can be buffered. defaultBufferSize = 1024 @@ -52,8 +43,7 @@ const ( // // * Use Channel for events originating outside the cluster (eh.g. GitHub Webhook callback, Polling external urls). // -// Users may build their own Source implementations. If their implementations implement any of the inject package -// interfaces, the dependencies will be injected by the Controller when Watch is called. +// Users may build their own Source implementations. type Source interface { // Start is internal and should be called only by the Controller to register an EventHandler with the Informer // to enqueue reconcile.Requests. @@ -67,144 +57,9 @@ type SyncingSource interface { WaitForSync(ctx context.Context) error } -// NewKindWithCache creates a Source without InjectCache, so that it is assured that the given cache is used -// and not overwritten. It can be used to watch objects in a different cluster by passing the cache -// from that other cluster. -func NewKindWithCache(object client.Object, cache cache.Cache) SyncingSource { - return &kindWithCache{kind: Kind{Type: object, cache: cache}} -} - -type kindWithCache struct { - kind Kind -} - -func (ks *kindWithCache) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, - prct ...predicate.Predicate) error { - return ks.kind.Start(ctx, handler, queue, prct...) -} - -func (ks *kindWithCache) String() string { - return ks.kind.String() -} - -func (ks *kindWithCache) WaitForSync(ctx context.Context) error { - return ks.kind.WaitForSync(ctx) -} - -// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). -type Kind struct { - // Type is the type of object to watch. e.g. &v1.Pod{} - Type client.Object - - // cache used to watch APIs - cache cache.Cache - - // started may contain an error if one was encountered during startup. If its closed and does not - // contain an error, startup and syncing finished. - started chan error - startCancel func() -} - -var _ SyncingSource = &Kind{} - -// Start is internal and should be called only by the Controller to register an EventHandler with the Informer -// to enqueue reconcile.Requests. -func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, - prct ...predicate.Predicate) error { - // Type should have been specified by the user. - if ks.Type == nil { - return fmt.Errorf("must specify Kind.Type") - } - - // cache should have been injected before Start was called - if ks.cache == nil { - return fmt.Errorf("must call CacheInto on Kind before calling Start") - } - - // cache.GetInformer will block until its context is cancelled if the cache was already started and it can not - // sync that informer (most commonly due to RBAC issues). - ctx, ks.startCancel = context.WithCancel(ctx) - ks.started = make(chan error) - go func() { - var ( - i cache.Informer - lastErr error - ) - - // Tries to get an informer until it returns true, - // an error or the specified context is cancelled or expired. - if err := wait.PollImmediateUntilWithContext(ctx, 10*time.Second, func(ctx context.Context) (bool, error) { - // Lookup the Informer from the Cache and add an EventHandler which populates the Queue - i, lastErr = ks.cache.GetInformer(ctx, ks.Type) - if lastErr != nil { - kindMatchErr := &meta.NoKindMatchError{} - switch { - case errors.As(lastErr, &kindMatchErr): - log.Error(lastErr, "if kind is a CRD, it should be installed before calling Start", - "kind", kindMatchErr.GroupKind) - case runtime.IsNotRegisteredError(lastErr): - log.Error(lastErr, "kind must be registered to the Scheme") - default: - log.Error(lastErr, "failed to get informer from cache") - } - return false, nil // Retry. - } - return true, nil - }); err != nil { - if lastErr != nil { - ks.started <- fmt.Errorf("failed to get informer from cache: %w", lastErr) - return - } - ks.started <- err - return - } - - _, err := i.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct}) - if err != nil { - ks.started <- err - return - } - if !ks.cache.WaitForCacheSync(ctx) { - // Would be great to return something more informative here - ks.started <- errors.New("cache did not sync") - } - close(ks.started) - }() - - return nil -} - -func (ks *Kind) String() string { - if ks.Type != nil { - return fmt.Sprintf("kind source: %T", ks.Type) - } - return "kind source: unknown type" -} - -// WaitForSync implements SyncingSource to allow controllers to wait with starting -// workers until the cache is synced. -func (ks *Kind) WaitForSync(ctx context.Context) error { - select { - case err := <-ks.started: - return err - case <-ctx.Done(): - ks.startCancel() - if errors.Is(ctx.Err(), context.Canceled) { - return nil - } - return errors.New("timed out waiting for cache to be synced") - } -} - -var _ inject.Cache = &Kind{} - -// InjectCache is internal should be called only by the Controller. InjectCache is used to inject -// the Cache dependency initialized by the ControllerManager. -func (ks *Kind) InjectCache(c cache.Cache) error { - if ks.cache == nil { - ks.cache = c - } - return nil +// Kind creates a KindSource with the given cache provider. +func Kind(cache cache.Cache, object client.Object) SyncingSource { + return &internal.Kind{Type: object, Cache: cache} } var _ Source = &Channel{} @@ -219,9 +74,6 @@ type Channel struct { // Source is the source channel to fetch GenericEvents Source <-chan event.GenericEvent - // stop is to end ongoing goroutine, and close the channels - stop <-chan struct{} - // dest is the destination channels of the added event handlers dest []chan event.GenericEvent @@ -237,18 +89,6 @@ func (cs *Channel) String() string { return fmt.Sprintf("channel source: %p", cs) } -var _ inject.Stoppable = &Channel{} - -// InjectStopChannel is internal should be called only by the Controller. -// It is used to inject the stop channel initialized by the ControllerManager. -func (cs *Channel) InjectStopChannel(stop <-chan struct{}) error { - if cs.stop == nil { - cs.stop = stop - } - - return nil -} - // Start implements Source and should only be called by the Controller. func (cs *Channel) Start( ctx context.Context, @@ -260,11 +100,6 @@ func (cs *Channel) Start( return fmt.Errorf("must specify Channel.Source") } - // stop should have been injected before Start was called - if cs.stop == nil { - return fmt.Errorf("must call InjectStop on Channel before calling Start") - } - // use default value if DestBufferSize not specified if cs.DestBufferSize == 0 { cs.DestBufferSize = defaultBufferSize @@ -292,7 +127,11 @@ func (cs *Channel) Start( } if shouldHandle { - handler.Generic(evt, queue) + func() { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + handler.Generic(ctx, evt, queue) + }() } } }() @@ -359,7 +198,7 @@ func (is *Informer) Start(ctx context.Context, handler handler.EventHandler, que return fmt.Errorf("must specify Informer.Informer") } - _, err := is.Informer.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct}) + _, err := is.Informer.AddEventHandler(internal.NewEventHandler(ctx, queue, handler, prct).HandlerFuncs()) if err != nil { return err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go index c7cb71b75..f14f130f7 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go @@ -19,7 +19,6 @@ package admission import ( "fmt" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/json" @@ -32,8 +31,11 @@ type Decoder struct { } // NewDecoder creates a Decoder given the runtime.Scheme. -func NewDecoder(scheme *runtime.Scheme) (*Decoder, error) { - return &Decoder{codecs: serializer.NewCodecFactory(scheme)}, nil +func NewDecoder(scheme *runtime.Scheme) *Decoder { + if scheme == nil { + panic("scheme should never be nil") + } + return &Decoder{codecs: serializer.NewCodecFactory(scheme)} } // Decode decodes the inlined object in the AdmissionRequest into the passed-in runtime.Object. @@ -62,9 +64,13 @@ func (d *Decoder) DecodeRaw(rawObj runtime.RawExtension, into runtime.Object) er if len(rawObj.Raw) == 0 { return fmt.Errorf("there is no content to decode") } - if unstructuredInto, isUnstructured := into.(*unstructured.Unstructured); isUnstructured { + if unstructuredInto, isUnstructured := into.(runtime.Unstructured); isUnstructured { // unmarshal into unstructured's underlying object to avoid calling the decoder - return json.Unmarshal(rawObj.Raw, &unstructuredInto.Object) + var object map[string]interface{} + if err := json.Unmarshal(rawObj.Raw, &object); err != nil { + return err + } + unstructuredInto.SetUnstructuredContent(object) } deserializer := d.codecs.UniversalDeserializer() diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go index e4e0778f5..a3b720716 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go @@ -33,9 +33,9 @@ type Defaulter interface { } // DefaultingWebhookFor creates a new Webhook for Defaulting the provided type. -func DefaultingWebhookFor(defaulter Defaulter) *Webhook { +func DefaultingWebhookFor(scheme *runtime.Scheme, defaulter Defaulter) *Webhook { return &Webhook{ - Handler: &mutatingHandler{defaulter: defaulter}, + Handler: &mutatingHandler{defaulter: defaulter, decoder: NewDecoder(scheme)}, } } @@ -44,16 +44,11 @@ type mutatingHandler struct { decoder *Decoder } -var _ DecoderInjector = &mutatingHandler{} - -// InjectDecoder injects the decoder into a mutatingHandler. -func (h *mutatingHandler) InjectDecoder(d *Decoder) error { - h.decoder = d - return nil -} - // Handle handles admission requests. func (h *mutatingHandler) Handle(ctx context.Context, req Request) Response { + if h.decoder == nil { + panic("decoder should never be nil") + } if h.defaulter == nil { panic("defaulter should never be nil") } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go index 700798424..5f697e7dc 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go @@ -34,9 +34,9 @@ type CustomDefaulter interface { } // WithCustomDefaulter creates a new Webhook for a CustomDefaulter interface. -func WithCustomDefaulter(obj runtime.Object, defaulter CustomDefaulter) *Webhook { +func WithCustomDefaulter(scheme *runtime.Scheme, obj runtime.Object, defaulter CustomDefaulter) *Webhook { return &Webhook{ - Handler: &defaulterForType{object: obj, defaulter: defaulter}, + Handler: &defaulterForType{object: obj, defaulter: defaulter, decoder: NewDecoder(scheme)}, } } @@ -46,15 +46,11 @@ type defaulterForType struct { decoder *Decoder } -var _ DecoderInjector = &defaulterForType{} - -func (h *defaulterForType) InjectDecoder(d *Decoder) error { - h.decoder = d - return nil -} - // Handle handles admission requests. func (h *defaulterForType) Handle(ctx context.Context, req Request) Response { + if h.decoder == nil { + panic("decoder should never be nil") + } if h.defaulter == nil { panic("defaulter should never be nil") } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go index 0b274dd02..8dc0cbec6 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go @@ -20,9 +20,3 @@ Package admission provides implementation for admission webhook and methods to i See examples/mutatingwebhook.go and examples/validatingwebhook.go for examples of admission webhooks. */ package admission - -import ( - logf "sigs.k8s.io/controller-runtime/pkg/internal/log" -) - -var log = logf.RuntimeLog.WithName("admission") diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go index 066cc4225..84ab5e75a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go @@ -52,7 +52,7 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { var reviewResponse Response if r.Body == nil { err = errors.New("request body is empty") - wh.log.Error(err, "bad request") + wh.getLogger(nil).Error(err, "bad request") reviewResponse = Errored(http.StatusBadRequest, err) wh.writeResponse(w, reviewResponse) return @@ -60,7 +60,7 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() if body, err = io.ReadAll(r.Body); err != nil { - wh.log.Error(err, "unable to read the body from the incoming request") + wh.getLogger(nil).Error(err, "unable to read the body from the incoming request") reviewResponse = Errored(http.StatusBadRequest, err) wh.writeResponse(w, reviewResponse) return @@ -69,7 +69,7 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { // verify the content type is accurate if contentType := r.Header.Get("Content-Type"); contentType != "application/json" { err = fmt.Errorf("contentType=%s, expected application/json", contentType) - wh.log.Error(err, "unable to process a request with an unknown content type", "content type", contentType) + wh.getLogger(nil).Error(err, "unable to process a request with unknown content type") reviewResponse = Errored(http.StatusBadRequest, err) wh.writeResponse(w, reviewResponse) return @@ -88,12 +88,12 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { ar.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("AdmissionReview")) _, actualAdmRevGVK, err := admissionCodecs.UniversalDeserializer().Decode(body, nil, &ar) if err != nil { - wh.log.Error(err, "unable to decode the request") + wh.getLogger(nil).Error(err, "unable to decode the request") reviewResponse = Errored(http.StatusBadRequest, err) wh.writeResponse(w, reviewResponse) return } - wh.log.V(1).Info("received request", "UID", req.UID, "kind", req.Kind, "resource", req.Resource) + wh.getLogger(&req).V(4).Info("received request") reviewResponse = wh.Handle(ctx, req) wh.writeResponseTyped(w, reviewResponse, actualAdmRevGVK) @@ -124,7 +124,7 @@ func (wh *Webhook) writeResponseTyped(w io.Writer, response Response, admRevGVK // writeAdmissionResponse writes ar to w. func (wh *Webhook) writeAdmissionResponse(w io.Writer, ar v1.AdmissionReview) { if err := json.NewEncoder(w).Encode(ar); err != nil { - wh.log.Error(err, "unable to encode and write the response") + wh.getLogger(nil).Error(err, "unable to encode and write the response") // Since the `ar v1.AdmissionReview` is a clear and legal object, // it should not have problem to be marshalled into bytes. // The error here is probably caused by the abnormal HTTP connection, @@ -132,15 +132,15 @@ func (wh *Webhook) writeAdmissionResponse(w io.Writer, ar v1.AdmissionReview) { // to avoid endless circular calling. serverError := Errored(http.StatusInternalServerError, err) if err = json.NewEncoder(w).Encode(v1.AdmissionReview{Response: &serverError.AdmissionResponse}); err != nil { - wh.log.Error(err, "still unable to encode and write the InternalServerError response") + wh.getLogger(nil).Error(err, "still unable to encode and write the InternalServerError response") } } else { res := ar.Response - if log := wh.log; log.V(1).Enabled() { + if log := wh.getLogger(nil); log.V(4).Enabled() { if res.Result != nil { - log = log.WithValues("code", res.Result.Code, "reason", res.Result.Reason) + log = log.WithValues("code", res.Result.Code, "reason", res.Result.Reason, "message", res.Result.Message) } - log.V(1).Info("wrote response", "UID", res.UID, "allowed", res.Allowed) + log.V(4).Info("wrote response", "requestID", res.UID, "allowed", res.Allowed) } } } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go index 26900cf2e..2f7820d04 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go @@ -25,8 +25,6 @@ import ( jsonpatch "gomodules.xyz/jsonpatch/v2" admissionv1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) type multiMutating []Handler @@ -62,31 +60,6 @@ func (hs multiMutating) Handle(ctx context.Context, req Request) Response { } } -// InjectFunc injects the field setter into the handlers. -func (hs multiMutating) InjectFunc(f inject.Func) error { - // inject directly into the handlers. It would be more correct - // to do this in a sync.Once in Handle (since we don't have some - // other start/finalize-type method), but it's more efficient to - // do it here, presumably. - for _, handler := range hs { - if err := f(handler); err != nil { - return err - } - } - - return nil -} - -// InjectDecoder injects the decoder into the handlers. -func (hs multiMutating) InjectDecoder(d *Decoder) error { - for _, handler := range hs { - if _, err := InjectDecoderInto(d, handler); err != nil { - return err - } - } - return nil -} - // MultiMutatingHandler combines multiple mutating webhook handlers into a single // mutating webhook handler. Handlers are called in sequential order, and the first // `allowed: false` response may short-circuit the rest. Users must take care to @@ -120,28 +93,3 @@ func (hs multiValidating) Handle(ctx context.Context, req Request) Response { func MultiValidatingHandler(handlers ...Handler) Handler { return multiValidating(handlers) } - -// InjectFunc injects the field setter into the handlers. -func (hs multiValidating) InjectFunc(f inject.Func) error { - // inject directly into the handlers. It would be more correct - // to do this in a sync.Once in Handle (since we don't have some - // other start/finalize-type method), but it's more efficient to - // do it here, presumably. - for _, handler := range hs { - if err := f(handler); err != nil { - return err - } - } - - return nil -} - -// InjectDecoder injects the decoder into the handlers. -func (hs multiValidating) InjectDecoder(d *Decoder) error { - for _, handler := range hs { - if _, err := InjectDecoderInto(d, handler); err != nil { - return err - } - } - return nil -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go index 24ff1dee3..ec1c88c98 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go @@ -26,21 +26,21 @@ import ( // Allowed constructs a response indicating that the given operation // is allowed (without any patches). -func Allowed(reason string) Response { - return ValidationResponse(true, reason) +func Allowed(message string) Response { + return ValidationResponse(true, message) } // Denied constructs a response indicating that the given operation // is not allowed. -func Denied(reason string) Response { - return ValidationResponse(false, reason) +func Denied(message string) Response { + return ValidationResponse(false, message) } // Patched constructs a response indicating that the given operation is // allowed, and that the target object should be modified by the given // JSONPatch operations. -func Patched(reason string, patches ...jsonpatch.JsonPatchOperation) Response { - resp := Allowed(reason) +func Patched(message string, patches ...jsonpatch.JsonPatchOperation) Response { + resp := Allowed(message) resp.Patches = patches return resp @@ -60,21 +60,24 @@ func Errored(code int32, err error) Response { } // ValidationResponse returns a response for admitting a request. -func ValidationResponse(allowed bool, reason string) Response { +func ValidationResponse(allowed bool, message string) Response { code := http.StatusForbidden + reason := metav1.StatusReasonForbidden if allowed { code = http.StatusOK + reason = "" } resp := Response{ AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: allowed, Result: &metav1.Status{ - Code: int32(code), + Code: int32(code), + Reason: reason, }, }, } - if len(reason) > 0 { - resp.Result.Reason = metav1.StatusReason(reason) + if len(message) > 0 { + resp.Result.Message = message } return resp } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go index 4b27e75ed..00bda8a4c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go @@ -18,7 +18,8 @@ package admission import ( "context" - goerrors "errors" + "errors" + "fmt" "net/http" v1 "k8s.io/api/admission/v1" @@ -26,18 +27,35 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +// Warnings represents warning messages. +type Warnings []string + // Validator defines functions for validating an operation. +// The custom resource kind which implements this interface can validate itself. +// To validate the custom resource with another specific struct, use CustomValidator instead. type Validator interface { runtime.Object - ValidateCreate() error - ValidateUpdate(old runtime.Object) error - ValidateDelete() error + + // ValidateCreate validates the object on creation. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateCreate() (warnings Warnings, err error) + + // ValidateUpdate validates the object on update. The oldObj is the object before the update. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateUpdate(old runtime.Object) (warnings Warnings, err error) + + // ValidateDelete validates the object on deletion. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateDelete() (warnings Warnings, err error) } // ValidatingWebhookFor creates a new Webhook for validating the provided type. -func ValidatingWebhookFor(validator Validator) *Webhook { +func ValidatingWebhookFor(scheme *runtime.Scheme, validator Validator) *Webhook { return &Webhook{ - Handler: &validatingHandler{validator: validator}, + Handler: &validatingHandler{validator: validator, decoder: NewDecoder(scheme)}, } } @@ -46,42 +64,34 @@ type validatingHandler struct { decoder *Decoder } -var _ DecoderInjector = &validatingHandler{} - -// InjectDecoder injects the decoder into a validatingHandler. -func (h *validatingHandler) InjectDecoder(d *Decoder) error { - h.decoder = d - return nil -} - // Handle handles admission requests. func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { + if h.decoder == nil { + panic("decoder should never be nil") + } if h.validator == nil { panic("validator should never be nil") } - // Get the object in the request obj := h.validator.DeepCopyObject().(Validator) - if req.Operation == v1.Create { - err := h.decoder.Decode(req, obj) - if err != nil { - return Errored(http.StatusBadRequest, err) - } - err = obj.ValidateCreate() - if err != nil { - var apiStatus apierrors.APIStatus - if goerrors.As(err, &apiStatus) { - return validationResponseFromStatus(false, apiStatus.Status()) - } - return Denied(err.Error()) + var err error + var warnings []string + + switch req.Operation { + case v1.Connect: + // No validation for connect requests. + // TODO(vincepri): Should we validate CONNECT requests? In what cases? + case v1.Create: + if err = h.decoder.Decode(req, obj); err != nil { + return Errored(http.StatusBadRequest, err) } - } - if req.Operation == v1.Update { + warnings, err = obj.ValidateCreate() + case v1.Update: oldObj := obj.DeepCopyObject() - err := h.decoder.DecodeRaw(req.Object, obj) + err = h.decoder.DecodeRaw(req.Object, obj) if err != nil { return Errored(http.StatusBadRequest, err) } @@ -90,33 +100,26 @@ func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { return Errored(http.StatusBadRequest, err) } - err = obj.ValidateUpdate(oldObj) - if err != nil { - var apiStatus apierrors.APIStatus - if goerrors.As(err, &apiStatus) { - return validationResponseFromStatus(false, apiStatus.Status()) - } - return Denied(err.Error()) - } - } - - if req.Operation == v1.Delete { + warnings, err = obj.ValidateUpdate(oldObj) + case v1.Delete: // In reference to PR: https://github.com/kubernetes/kubernetes/pull/76346 // OldObject contains the object being deleted - err := h.decoder.DecodeRaw(req.OldObject, obj) + err = h.decoder.DecodeRaw(req.OldObject, obj) if err != nil { return Errored(http.StatusBadRequest, err) } - err = obj.ValidateDelete() - if err != nil { - var apiStatus apierrors.APIStatus - if goerrors.As(err, &apiStatus) { - return validationResponseFromStatus(false, apiStatus.Status()) - } - return Denied(err.Error()) - } + warnings, err = obj.ValidateDelete() + default: + return Errored(http.StatusBadRequest, fmt.Errorf("unknown operation %q", req.Operation)) } - return Allowed("") + if err != nil { + var apiStatus apierrors.APIStatus + if errors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()).WithWarnings(warnings...) + } + return Denied(err.Error()).WithWarnings(warnings...) + } + return Allowed("").WithWarnings(warnings...) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go index 33252f113..e99fbd8a8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go @@ -28,16 +28,29 @@ import ( ) // CustomValidator defines functions for validating an operation. +// The object to be validated is passed into methods as a parameter. type CustomValidator interface { - ValidateCreate(ctx context.Context, obj runtime.Object) error - ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) error - ValidateDelete(ctx context.Context, obj runtime.Object) error + + // ValidateCreate validates the object on creation. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateCreate(ctx context.Context, obj runtime.Object) (warnings Warnings, err error) + + // ValidateUpdate validates the object on update. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (warnings Warnings, err error) + + // ValidateDelete validates the object on deletion. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateDelete(ctx context.Context, obj runtime.Object) (warnings Warnings, err error) } // WithCustomValidator creates a new Webhook for validating the provided type. -func WithCustomValidator(obj runtime.Object, validator CustomValidator) *Webhook { +func WithCustomValidator(scheme *runtime.Scheme, obj runtime.Object, validator CustomValidator) *Webhook { return &Webhook{ - Handler: &validatorForType{object: obj, validator: validator}, + Handler: &validatorForType{object: obj, validator: validator, decoder: NewDecoder(scheme)}, } } @@ -47,16 +60,11 @@ type validatorForType struct { decoder *Decoder } -var _ DecoderInjector = &validatorForType{} - -// InjectDecoder injects the decoder into a validatingHandler. -func (h *validatorForType) InjectDecoder(d *Decoder) error { - h.decoder = d - return nil -} - // Handle handles admission requests. func (h *validatorForType) Handle(ctx context.Context, req Request) Response { + if h.decoder == nil { + panic("decoder should never be nil") + } if h.validator == nil { panic("validator should never be nil") } @@ -70,13 +78,18 @@ func (h *validatorForType) Handle(ctx context.Context, req Request) Response { obj := h.object.DeepCopyObject() var err error + var warnings []string + switch req.Operation { + case v1.Connect: + // No validation for connect requests. + // TODO(vincepri): Should we validate CONNECT requests? In what cases? case v1.Create: if err := h.decoder.Decode(req, obj); err != nil { return Errored(http.StatusBadRequest, err) } - err = h.validator.ValidateCreate(ctx, obj) + warnings, err = h.validator.ValidateCreate(ctx, obj) case v1.Update: oldObj := obj.DeepCopyObject() if err := h.decoder.DecodeRaw(req.Object, obj); err != nil { @@ -86,7 +99,7 @@ func (h *validatorForType) Handle(ctx context.Context, req Request) Response { return Errored(http.StatusBadRequest, err) } - err = h.validator.ValidateUpdate(ctx, oldObj, obj) + warnings, err = h.validator.ValidateUpdate(ctx, oldObj, obj) case v1.Delete: // In reference to PR: https://github.com/kubernetes/kubernetes/pull/76346 // OldObject contains the object being deleted @@ -94,20 +107,20 @@ func (h *validatorForType) Handle(ctx context.Context, req Request) Response { return Errored(http.StatusBadRequest, err) } - err = h.validator.ValidateDelete(ctx, obj) + warnings, err = h.validator.ValidateDelete(ctx, obj) default: - return Errored(http.StatusBadRequest, fmt.Errorf("unknown operation request %q", req.Operation)) + return Errored(http.StatusBadRequest, fmt.Errorf("unknown operation %q", req.Operation)) } // Check the error message first. if err != nil { var apiStatus apierrors.APIStatus if errors.As(err, &apiStatus) { - return validationResponseFromStatus(false, apiStatus.Status()) + return validationResponseFromStatus(false, apiStatus.Status()).WithWarnings(warnings...) } - return Denied(err.Error()) + return Denied(err.Error()).WithWarnings(warnings...) } // Return allowed if everything succeeded. - return Allowed("") + return Allowed("").WithWarnings(warnings...) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go index d10b97ddd..f1767f31b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go @@ -21,18 +21,17 @@ import ( "errors" "fmt" "net/http" + "sync" "github.com/go-logr/logr" - jsonpatch "gomodules.xyz/jsonpatch/v2" + "gomodules.xyz/jsonpatch/v2" admissionv1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/kubernetes/scheme" + "k8s.io/klog/v2" - logf "sigs.k8s.io/controller-runtime/pkg/internal/log" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" ) @@ -131,16 +130,14 @@ type Webhook struct { // headers thus allowing you to read them from within the handler WithContextFunc func(context.Context, *http.Request) context.Context - // decoder is constructed on receiving a scheme and passed down to then handler - decoder *Decoder + // LogConstructor is used to construct a logger for logging messages during webhook calls + // based on the given base logger (which might carry more values like the webhook's path). + // Note: LogConstructor has to be able to handle nil requests as we are also using it + // outside the context of requests. + LogConstructor func(base logr.Logger, req *Request) logr.Logger - log logr.Logger -} - -// InjectLogger gets a handle to a logging instance, hopefully with more info about this particular webhook. -func (wh *Webhook) InjectLogger(l logr.Logger) error { - wh.log = l - return nil + setupLogOnce sync.Once + log logr.Logger } // WithRecoverPanic takes a bool flag which indicates whether the panic caused by webhook should be recovered. @@ -166,79 +163,47 @@ func (wh *Webhook) Handle(ctx context.Context, req Request) (response Response) }() } + reqLog := wh.getLogger(&req) + ctx = logf.IntoContext(ctx, reqLog) + resp := wh.Handler.Handle(ctx, req) if err := resp.Complete(req); err != nil { - wh.log.Error(err, "unable to encode response") + reqLog.Error(err, "unable to encode response") return Errored(http.StatusInternalServerError, errUnableToEncodeResponse) } return resp } -// InjectScheme injects a scheme into the webhook, in order to construct a Decoder. -func (wh *Webhook) InjectScheme(s *runtime.Scheme) error { - // TODO(directxman12): we should have a better way to pass this down - - var err error - wh.decoder, err = NewDecoder(s) - if err != nil { - return err - } - - // inject the decoder here too, just in case the order of calling this is not - // scheme first, then inject func - if wh.Handler != nil { - if _, err := InjectDecoderInto(wh.GetDecoder(), wh.Handler); err != nil { - return err +// getLogger constructs a logger from the injected log and LogConstructor. +func (wh *Webhook) getLogger(req *Request) logr.Logger { + wh.setupLogOnce.Do(func() { + if wh.log.GetSink() == nil { + wh.log = logf.Log.WithName("admission") } - } - - return nil -} + }) -// GetDecoder returns a decoder to decode the objects embedded in admission requests. -// It may be nil if we haven't received a scheme to use to determine object types yet. -func (wh *Webhook) GetDecoder() *Decoder { - return wh.decoder + logConstructor := wh.LogConstructor + if logConstructor == nil { + logConstructor = DefaultLogConstructor + } + return logConstructor(wh.log, req) } -// InjectFunc injects the field setter into the webhook. -func (wh *Webhook) InjectFunc(f inject.Func) error { - // inject directly into the handlers. It would be more correct - // to do this in a sync.Once in Handle (since we don't have some - // other start/finalize-type method), but it's more efficient to - // do it here, presumably. - - // also inject a decoder, and wrap this so that we get a setFields - // that injects a decoder (hopefully things don't ignore the duplicate - // InjectorInto call). - - var setFields inject.Func - setFields = func(target interface{}) error { - if err := f(target); err != nil { - return err - } - - if _, err := inject.InjectorInto(setFields, target); err != nil { - return err - } - - if _, err := InjectDecoderInto(wh.GetDecoder(), target); err != nil { - return err - } - - return nil +// DefaultLogConstructor adds some commonly interesting fields to the given logger. +func DefaultLogConstructor(base logr.Logger, req *Request) logr.Logger { + if req != nil { + return base.WithValues("object", klog.KRef(req.Namespace, req.Name), + "namespace", req.Namespace, "name", req.Name, + "resource", req.Resource, "user", req.UserInfo.Username, + "requestID", req.UID, + ) } - - return setFields(wh.Handler) + return base } // StandaloneOptions let you configure a StandaloneWebhook. type StandaloneOptions struct { - // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources - // Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better - // idea to pass your own scheme in. See the documentation in pkg/scheme for more information. - Scheme *runtime.Scheme // Logger to be used by the webhook. // If none is set, it defaults to log.Log global logger. Logger logr.Logger @@ -258,19 +223,9 @@ type StandaloneOptions struct { // in your own server/mux. In order to be accessed by a kubernetes cluster, // all webhook servers require TLS. func StandaloneWebhook(hook *Webhook, opts StandaloneOptions) (http.Handler, error) { - if opts.Scheme == nil { - opts.Scheme = scheme.Scheme - } - - if err := hook.InjectScheme(opts.Scheme); err != nil { - return nil, err + if opts.Logger.GetSink() != nil { + hook.log = opts.Logger } - - if opts.Logger.GetSink() == nil { - opts.Logger = logf.RuntimeLog.WithName("webhook") - } - hook.log = opts.Logger - if opts.MetricsPath == "" { return hook, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go index 879aae3c9..249a364b3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go @@ -39,28 +39,20 @@ var ( log = logf.Log.WithName("conversion-webhook") ) -// Webhook implements a CRD conversion webhook HTTP handler. -type Webhook struct { - scheme *runtime.Scheme - decoder *Decoder +func NewWebhookHandler(scheme *runtime.Scheme) http.Handler { + return &webhook{scheme: scheme, decoder: NewDecoder(scheme)} } -// InjectScheme injects a scheme into the webhook, in order to construct a Decoder. -func (wh *Webhook) InjectScheme(s *runtime.Scheme) error { - var err error - wh.scheme = s - wh.decoder, err = NewDecoder(s) - if err != nil { - return err - } - - return nil +// webhook implements a CRD conversion webhook HTTP handler. +type webhook struct { + scheme *runtime.Scheme + decoder *Decoder } // ensure Webhook implements http.Handler -var _ http.Handler = &Webhook{} +var _ http.Handler = &webhook{} -func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (wh *webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { convertReview := &apix.ConversionReview{} err := json.NewDecoder(r.Body).Decode(convertReview) if err != nil { @@ -95,7 +87,7 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { } // handles a version conversion request. -func (wh *Webhook) handleConvertRequest(req *apix.ConversionRequest) (*apix.ConversionResponse, error) { +func (wh *webhook) handleConvertRequest(req *apix.ConversionRequest) (*apix.ConversionResponse, error) { if req == nil { return nil, fmt.Errorf("conversion request is nil") } @@ -128,7 +120,7 @@ func (wh *Webhook) handleConvertRequest(req *apix.ConversionRequest) (*apix.Conv // convertObject will convert given a src object to dst object. // Note(droot): couldn't find a way to reduce the cyclomatic complexity under 10 // without compromising readability, so disabling gocyclo linter -func (wh *Webhook) convertObject(src, dst runtime.Object) error { +func (wh *webhook) convertObject(src, dst runtime.Object) error { srcGVK := src.GetObjectKind().GroupVersionKind() dstGVK := dst.GetObjectKind().GroupVersionKind() @@ -155,7 +147,7 @@ func (wh *Webhook) convertObject(src, dst runtime.Object) error { } } -func (wh *Webhook) convertViaHub(src, dst conversion.Convertible) error { +func (wh *webhook) convertViaHub(src, dst conversion.Convertible) error { hub, err := wh.getHub(src) if err != nil { return err @@ -179,7 +171,7 @@ func (wh *Webhook) convertViaHub(src, dst conversion.Convertible) error { } // getHub returns an instance of the Hub for passed-in object's group/kind. -func (wh *Webhook) getHub(obj runtime.Object) (conversion.Hub, error) { +func (wh *webhook) getHub(obj runtime.Object) (conversion.Hub, error) { gvks, err := objectGVKs(wh.scheme, obj) if err != nil { return nil, err @@ -207,7 +199,7 @@ func (wh *Webhook) getHub(obj runtime.Object) (conversion.Hub, error) { } // allocateDstObject returns an instance for a given GVK. -func (wh *Webhook) allocateDstObject(apiVersion, kind string) (runtime.Object, error) { +func (wh *webhook) allocateDstObject(apiVersion, kind string) (runtime.Object, error) { gvk := schema.FromAPIVersionAndKind(apiVersion, kind) obj, err := wh.scheme.New(gvk) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go index 6a9e9c236..b6bb8bd93 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go @@ -30,8 +30,11 @@ type Decoder struct { } // NewDecoder creates a Decoder given the runtime.Scheme -func NewDecoder(scheme *runtime.Scheme) (*Decoder, error) { - return &Decoder{codecs: serializer.NewCodecFactory(scheme)}, nil +func NewDecoder(scheme *runtime.Scheme) *Decoder { + if scheme == nil { + panic("scheme should never be nil") + } + return &Decoder{codecs: serializer.NewCodecFactory(scheme)} } // Decode decodes the inlined object. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go index 99c863264..23d5bf435 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go @@ -29,12 +29,9 @@ import ( "sync" "time" - "k8s.io/apimachinery/pkg/runtime" - kscheme "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/certwatcher" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/internal/httpserver" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" ) @@ -49,7 +46,29 @@ var DefaultPort = 9443 // at the default locations (tls.crt and tls.key). If you do not // want to configure TLS (i.e for testing purposes) run an // admission.StandaloneWebhook in your own server. -type Server struct { +type Server interface { + // NeedLeaderElection implements the LeaderElectionRunnable interface, which indicates + // the webhook server doesn't need leader election. + NeedLeaderElection() bool + + // Register marks the given webhook as being served at the given path. + // It panics if two hooks are registered on the same path. + Register(path string, hook http.Handler) + + // Start runs the server. + // It will install the webhook related resources depend on the server configuration. + Start(ctx context.Context) error + + // StartedChecker returns an healthz.Checker which is healthy after the + // server has been started. + StartedChecker() healthz.Checker + + // WebhookMux returns the servers WebhookMux + WebhookMux() *http.ServeMux +} + +// Options are all the available options for a webhook.Server +type Options struct { // Host is the address that the server will listen on. // Defaults to "" - all addresses. Host string @@ -63,9 +82,13 @@ type Server struct { CertDir string // CertName is the server certificate name. Defaults to tls.crt. + // + // Note: This option should only be set when TLSOpts does not override GetCertificate. CertName string // KeyName is the server key name. Defaults to tls.key. + // + // Note: This option should only be set when TLSOpts does not override GetCertificate. KeyName string // ClientCAName is the CA certificate name which server used to verify remote(client)'s certificate. @@ -82,13 +105,21 @@ type Server struct { // WebhookMux is the multiplexer that handles different webhooks. WebhookMux *http.ServeMux +} - // webhooks keep track of all registered webhooks for dependency injection, - // and to provide better panic messages on duplicate webhook registration. - webhooks map[string]http.Handler +// NewServer constructs a new Server from the provided options. +func NewServer(o Options) Server { + return &DefaultServer{ + Options: o, + } +} - // setFields allows injecting dependencies from an external source - setFields inject.Func +// DefaultServer is the default implementation used for Server. +type DefaultServer struct { + Options Options + + // webhooks keep track of all registered webhooks + webhooks map[string]http.Handler // defaultingOnce ensures that the default fields are only ever set once. defaultingOnce sync.Once @@ -99,41 +130,49 @@ type Server struct { // mu protects access to the webhook map & setFields for Start, Register, etc mu sync.Mutex + + webhookMux *http.ServeMux } // setDefaults does defaulting for the Server. -func (s *Server) setDefaults() { - s.webhooks = map[string]http.Handler{} - if s.WebhookMux == nil { - s.WebhookMux = http.NewServeMux() +func (o *Options) setDefaults() { + if o.WebhookMux == nil { + o.WebhookMux = http.NewServeMux() } - if s.Port <= 0 { - s.Port = DefaultPort + if o.Port <= 0 { + o.Port = DefaultPort } - if len(s.CertDir) == 0 { - s.CertDir = filepath.Join(os.TempDir(), "k8s-webhook-server", "serving-certs") + if len(o.CertDir) == 0 { + o.CertDir = filepath.Join(os.TempDir(), "k8s-webhook-server", "serving-certs") } - if len(s.CertName) == 0 { - s.CertName = "tls.crt" + if len(o.CertName) == 0 { + o.CertName = "tls.crt" } - if len(s.KeyName) == 0 { - s.KeyName = "tls.key" + if len(o.KeyName) == 0 { + o.KeyName = "tls.key" } } +func (s *DefaultServer) setDefaults() { + s.webhooks = map[string]http.Handler{} + s.Options.setDefaults() + + s.webhookMux = s.Options.WebhookMux +} + // NeedLeaderElection implements the LeaderElectionRunnable interface, which indicates // the webhook server doesn't need leader election. -func (*Server) NeedLeaderElection() bool { +func (*DefaultServer) NeedLeaderElection() bool { return false } // Register marks the given webhook as being served at the given path. // It panics if two hooks are registered on the same path. -func (s *Server) Register(path string, hook http.Handler) { +func (s *DefaultServer) Register(path string, hook http.Handler) { s.mu.Lock() defer s.mu.Unlock() @@ -141,51 +180,11 @@ func (s *Server) Register(path string, hook http.Handler) { if _, found := s.webhooks[path]; found { panic(fmt.Errorf("can't register duplicate path: %v", path)) } - // TODO(directxman12): call setfields if we've already started the server s.webhooks[path] = hook - s.WebhookMux.Handle(path, metrics.InstrumentedHook(path, hook)) + s.webhookMux.Handle(path, metrics.InstrumentedHook(path, hook)) regLog := log.WithValues("path", path) regLog.Info("Registering webhook") - - // we've already been "started", inject dependencies here. - // Otherwise, InjectFunc will do this for us later. - if s.setFields != nil { - if err := s.setFields(hook); err != nil { - // TODO(directxman12): swallowing this error isn't great, but we'd have to - // change the signature to fix that - regLog.Error(err, "unable to inject fields into webhook during registration") - } - - baseHookLog := log.WithName("webhooks") - - // NB(directxman12): we don't propagate this further by wrapping setFields because it's - // unclear if this is how we want to deal with log propagation. In this specific instance, - // we want to be able to pass a logger to webhooks because they don't know their own path. - if _, err := inject.LoggerInto(baseHookLog.WithValues("webhook", path), hook); err != nil { - regLog.Error(err, "unable to logger into webhook during registration") - } - } -} - -// StartStandalone runs a webhook server without -// a controller manager. -func (s *Server) StartStandalone(ctx context.Context, scheme *runtime.Scheme) error { - // Use the Kubernetes client-go scheme if none is specified - if scheme == nil { - scheme = kscheme.Scheme - } - - if err := s.InjectFunc(func(i interface{}) error { - if _, err := inject.SchemeInto(scheme, i); err != nil { - return err - } - return nil - }); err != nil { - return err - } - - return s.Start(ctx) } // tlsVersion converts from human-readable TLS version (for example "1.1") @@ -210,41 +209,49 @@ func tlsVersion(version string) (uint16, error) { // Start runs the server. // It will install the webhook related resources depend on the server configuration. -func (s *Server) Start(ctx context.Context) error { +func (s *DefaultServer) Start(ctx context.Context) error { s.defaultingOnce.Do(s.setDefaults) baseHookLog := log.WithName("webhooks") baseHookLog.Info("Starting webhook server") - certPath := filepath.Join(s.CertDir, s.CertName) - keyPath := filepath.Join(s.CertDir, s.KeyName) - - certWatcher, err := certwatcher.New(certPath, keyPath) + tlsMinVersion, err := tlsVersion(s.Options.TLSMinVersion) if err != nil { return err } - go func() { - if err := certWatcher.Start(ctx); err != nil { - log.Error(err, "certificate watcher error") - } - }() - - tlsMinVersion, err := tlsVersion(s.TLSMinVersion) - if err != nil { - return err + cfg := &tls.Config{ //nolint:gosec + NextProtos: []string{"h2"}, + MinVersion: tlsMinVersion, + } + // fallback TLS config ready, will now mutate if passer wants full control over it + for _, op := range s.Options.TLSOpts { + op(cfg) } - cfg := &tls.Config{ //nolint:gosec - NextProtos: []string{"h2"}, - GetCertificate: certWatcher.GetCertificate, - MinVersion: tlsMinVersion, + if cfg.GetCertificate == nil { + certPath := filepath.Join(s.Options.CertDir, s.Options.CertName) + keyPath := filepath.Join(s.Options.CertDir, s.Options.KeyName) + + // Create the certificate watcher and + // set the config's GetCertificate on the TLSConfig + certWatcher, err := certwatcher.New(certPath, keyPath) + if err != nil { + return err + } + cfg.GetCertificate = certWatcher.GetCertificate + + go func() { + if err := certWatcher.Start(ctx); err != nil { + log.Error(err, "certificate watcher error") + } + }() } - // load CA to verify client certificate - if s.ClientCAName != "" { + // Load CA to verify client certificate, if configured. + if s.Options.ClientCAName != "" { certPool := x509.NewCertPool() - clientCABytes, err := os.ReadFile(filepath.Join(s.CertDir, s.ClientCAName)) + clientCABytes, err := os.ReadFile(filepath.Join(s.Options.CertDir, s.Options.ClientCAName)) if err != nil { return fmt.Errorf("failed to read client CA cert: %w", err) } @@ -258,27 +265,23 @@ func (s *Server) Start(ctx context.Context) error { cfg.ClientAuth = tls.RequireAndVerifyClientCert } - // fallback TLS config ready, will now mutate if passer wants full control over it - for _, op := range s.TLSOpts { - op(cfg) - } - - listener, err := tls.Listen("tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), cfg) + listener, err := tls.Listen("tcp", net.JoinHostPort(s.Options.Host, strconv.Itoa(s.Options.Port)), cfg) if err != nil { return err } - log.Info("Serving webhook server", "host", s.Host, "port", s.Port) + log.Info("Serving webhook server", "host", s.Options.Host, "port", s.Options.Port) - srv := httpserver.New(s.WebhookMux) + srv := httpserver.New(s.webhookMux) idleConnsClosed := make(chan struct{}) go func() { <-ctx.Done() - log.Info("shutting down webhook server") + log.Info("Shutting down webhook server with timeout of 1 minute") - // TODO: use a context with reasonable timeout - if err := srv.Shutdown(context.Background()); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + if err := srv.Shutdown(ctx); err != nil { // Error from closing listeners, or context timeout log.Error(err, "error shutting down the HTTP server") } @@ -298,7 +301,7 @@ func (s *Server) Start(ctx context.Context) error { // StartedChecker returns an healthz.Checker which is healthy after the // server has been started. -func (s *Server) StartedChecker() healthz.Checker { +func (s *DefaultServer) StartedChecker() healthz.Checker { config := &tls.Config{ InsecureSkipVerify: true, //nolint:gosec // config is used to connect to our own webhook port. } @@ -311,7 +314,7 @@ func (s *Server) StartedChecker() healthz.Checker { } d := &net.Dialer{Timeout: 10 * time.Second} - conn, err := tls.DialWithDialer(d, "tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), config) + conn, err := tls.DialWithDialer(d, "tcp", net.JoinHostPort(s.Options.Host, strconv.Itoa(s.Options.Port)), config) if err != nil { return fmt.Errorf("webhook server is not reachable: %w", err) } @@ -324,23 +327,7 @@ func (s *Server) StartedChecker() healthz.Checker { } } -// InjectFunc injects the field setter into the server. -func (s *Server) InjectFunc(f inject.Func) error { - s.setFields = f - - // inject fields here that weren't injected in Register because we didn't have setFields yet. - baseHookLog := log.WithName("webhooks") - for hookPath, webhook := range s.webhooks { - if err := s.setFields(webhook); err != nil { - return err - } - - // NB(directxman12): we don't propagate this further by wrapping setFields because it's - // unclear if this is how we want to deal with log propagation. In this specific instance, - // we want to be able to pass a logger to webhooks because they don't know their own path. - if _, err := inject.LoggerInto(baseHookLog.WithValues("webhook", hookPath), webhook); err != nil { - return err - } - } - return nil +// WebhookMux returns the servers WebhookMux +func (s *DefaultServer) WebhookMux() *http.ServeMux { + return s.webhookMux }