diff --git a/go.mod b/go.mod index 96cf81ea6..4fc4e286a 100644 --- a/go.mod +++ b/go.mod @@ -22,6 +22,7 @@ require ( k8s.io/klog v1.0.0 k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 sigs.k8s.io/cluster-api v1.8.4 + sigs.k8s.io/cluster-api-provider-aws/v2 v2.6.1 sigs.k8s.io/cluster-api-provider-azure v1.17.1 sigs.k8s.io/controller-runtime v0.19.0 sigs.k8s.io/yaml v1.4.0 @@ -125,7 +126,7 @@ require ( github.com/gostaticanalysis/comment v1.4.2 // indirect github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect github.com/gostaticanalysis/nilerr v0.1.1 // indirect - github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect + github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc // indirect github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect diff --git a/go.sum b/go.sum index 5f4c09331..58835ec4d 100644 --- a/go.sum +++ b/go.sum @@ -207,6 +207,8 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= @@ -275,8 +277,8 @@ github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc h1:f8eY6cV/x1x+HLjOp4r72s/31/V2aTUtg5oKRRPf8/Q= +github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= @@ -479,8 +481,8 @@ github.com/sashamelentyev/usestdlibvars v1.27.0 h1:t/3jZpSXtRPRf2xr0m63i32Zrusyu github.com/sashamelentyev/usestdlibvars v1.27.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8= github.com/securego/gosec/v2 v2.21.2 h1:deZp5zmYf3TWwU7A7cR2+SolbTpZ3HQiwFqnzQyEl3M= github.com/securego/gosec/v2 v2.21.2/go.mod h1:au33kg78rNseF5PwPnTWhuYBFf534bvJRvOrgZ/bFzU= -github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= @@ -808,6 +810,8 @@ mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f h1:lMpcwN6GxNbWtbpI1+xzFLSW8 mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f/go.mod h1:RSLa7mKKCNeTTMHBw5Hsy2rfJmd6O2ivt9Dw9ZqCQpQ= sigs.k8s.io/cluster-api v1.8.4 h1:jBKQH1H/HUdUFk8T6qDzIxZJfWw1F5ZP0ZpYQJDmTHs= sigs.k8s.io/cluster-api v1.8.4/go.mod h1:pXv5LqLxuIbhGIXykyNKiJh+KrLweSBajVHHitPLyoY= +sigs.k8s.io/cluster-api-provider-aws/v2 v2.6.1 h1:vbZUYEB7OfPlfHk6wis+UrvRLTqv5F4Nrjl2WDJ1kiw= +sigs.k8s.io/cluster-api-provider-aws/v2 v2.6.1/go.mod h1:1aq1EZbirRW6NC2gYUFCc7cVFwX9PM/vDvoU+2oGPuw= sigs.k8s.io/cluster-api-provider-azure v1.17.1 h1:f1sTGfv6hAN9WrxeawE4pQ2nRhEKb7AJjH6MhU/wAzg= sigs.k8s.io/cluster-api-provider-azure v1.17.1/go.mod h1:16VtsvIpK8qtNHplG2ZHZ74/JKTzOUQIAWWutjnpvEc= sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= diff --git a/vendor/github.com/gregjones/httpcache/.travis.yml b/vendor/github.com/gregjones/httpcache/.travis.yml index b5ffbe03d..597bc9996 100644 --- a/vendor/github.com/gregjones/httpcache/.travis.yml +++ b/vendor/github.com/gregjones/httpcache/.travis.yml @@ -1,19 +1,18 @@ sudo: false language: go -go: - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - master matrix: allow_failures: - go: master fast_finish: true + include: + - go: 1.10.x + - go: 1.11.x + env: GOFMT=1 + - go: master install: - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). script: - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d .) + - if test -n "${GOFMT}"; then gofmt -w -s . && git diff --exit-code; fi - go tool vet . - go test -v -race ./... diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go index f6a2ec4a5..b41a63d1f 100644 --- a/vendor/github.com/gregjones/httpcache/httpcache.go +++ b/vendor/github.com/gregjones/httpcache/httpcache.go @@ -416,14 +416,14 @@ func canStaleOnError(respHeaders, reqHeaders http.Header) bool { func getEndToEndHeaders(respHeaders http.Header) []string { // These headers are always hop-by-hop hopByHopHeaders := map[string]struct{}{ - "Connection": struct{}{}, - "Keep-Alive": struct{}{}, - "Proxy-Authenticate": struct{}{}, - "Proxy-Authorization": struct{}{}, - "Te": struct{}{}, - "Trailers": struct{}{}, - "Transfer-Encoding": struct{}{}, - "Upgrade": struct{}{}, + "Connection": {}, + "Keep-Alive": {}, + "Proxy-Authenticate": {}, + "Proxy-Authorization": {}, + "Te": {}, + "Trailers": {}, + "Transfer-Encoding": {}, + "Upgrade": {}, } for _, extra := range strings.Split(respHeaders.Get("connection"), ",") { @@ -433,7 +433,7 @@ func getEndToEndHeaders(respHeaders http.Header) []string { } } endToEndHeaders := []string{} - for respHeader, _ := range respHeaders { + for respHeader := range respHeaders { if _, ok := hopByHopHeaders[respHeader]; !ok { endToEndHeaders = append(endToEndHeaders, respHeader) } diff --git a/vendor/modules.txt b/vendor/modules.txt index e6f573c00..11f690b0a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -571,7 +571,7 @@ github.com/gostaticanalysis/forcetypeassert # github.com/gostaticanalysis/nilerr v0.1.1 ## explicit; go 1.15 github.com/gostaticanalysis/nilerr -# github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 +# github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc ## explicit github.com/gregjones/httpcache # github.com/hashicorp/go-version v1.7.0 @@ -2016,6 +2016,10 @@ sigs.k8s.io/cluster-api/util/conditions sigs.k8s.io/cluster-api/util/contract sigs.k8s.io/cluster-api/util/labels/format sigs.k8s.io/cluster-api/util/topology +# sigs.k8s.io/cluster-api-provider-aws/v2 v2.6.1 +## explicit; go 1.21 +sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2 +sigs.k8s.io/cluster-api-provider-aws/v2/feature # sigs.k8s.io/cluster-api-provider-azure v1.17.1 ## explicit; go 1.22.0 sigs.k8s.io/cluster-api-provider-azure/api/v1beta1 diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/LICENSE b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awscluster_types.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awscluster_types.go new file mode 100644 index 000000000..213ad99c5 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awscluster_types.go @@ -0,0 +1,360 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +const ( + // ClusterFinalizer allows ReconcileAWSCluster to clean up AWS resources associated with AWSCluster before + // removing it from the apiserver. + ClusterFinalizer = "awscluster.infrastructure.cluster.x-k8s.io" + + // AWSClusterControllerIdentityName is the name of the AWSClusterControllerIdentity singleton. + AWSClusterControllerIdentityName = "default" +) + +// AWSClusterSpec defines the desired state of an EC2-based Kubernetes cluster. +type AWSClusterSpec struct { + // NetworkSpec encapsulates all things related to AWS network. + NetworkSpec NetworkSpec `json:"network,omitempty"` + + // The AWS Region the cluster lives in. + Region string `json:"region,omitempty"` + + // Partition is the AWS security partition being used. Defaults to "aws" + // +optional + Partition string `json:"partition,omitempty"` + + // SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name) + // +optional + SSHKeyName *string `json:"sshKeyName,omitempty"` + + // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + // +optional + ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` + + // AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the + // ones added by default. + // +optional + AdditionalTags Tags `json:"additionalTags,omitempty"` + + // ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior. + // +optional + ControlPlaneLoadBalancer *AWSLoadBalancerSpec `json:"controlPlaneLoadBalancer,omitempty"` + + // SecondaryControlPlaneLoadBalancer is an additional load balancer that can be used for the control plane. + // + // An example use case is to have a separate internal load balancer for internal traffic, + // and a separate external load balancer for external traffic. + // + // +optional + SecondaryControlPlaneLoadBalancer *AWSLoadBalancerSpec `json:"secondaryControlPlaneLoadBalancer,omitempty"` + + // ImageLookupFormat is the AMI naming format to look up machine images when + // a machine does not specify an AMI. When set, this will be used for all + // cluster machines unless a machine specifies a different ImageLookupOrg. + // Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base + // OS and kubernetes version, respectively. The BaseOS will be the value in + // ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as + // defined by the packages produced by kubernetes/release without v as a + // prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default + // image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up + // searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a + // Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See + // also: https://golang.org/pkg/text/template/ + // +optional + ImageLookupFormat string `json:"imageLookupFormat,omitempty"` + + // ImageLookupOrg is the AWS Organization ID to look up machine images when a + // machine does not specify an AMI. When set, this will be used for all + // cluster machines unless a machine specifies a different ImageLookupOrg. + // +optional + ImageLookupOrg string `json:"imageLookupOrg,omitempty"` + + // ImageLookupBaseOS is the name of the base operating system used to look + // up machine images when a machine does not specify an AMI. When set, this + // will be used for all cluster machines unless a machine specifies a + // different ImageLookupBaseOS. + ImageLookupBaseOS string `json:"imageLookupBaseOS,omitempty"` + + // Bastion contains options to configure the bastion host. + // +optional + Bastion Bastion `json:"bastion"` + + // +optional + + // IdentityRef is a reference to an identity to be used when reconciling the managed control plane. + // If no identity is specified, the default identity for this controller will be used. + IdentityRef *AWSIdentityReference `json:"identityRef,omitempty"` + + // S3Bucket contains options to configure a supporting S3 bucket for this + // cluster - currently used for nodes requiring Ignition + // (https://coreos.github.io/ignition/) for bootstrapping (requires + // BootstrapFormatIgnition feature flag to be enabled). + // +optional + S3Bucket *S3Bucket `json:"s3Bucket,omitempty"` +} + +// AWSIdentityKind defines allowed AWS identity types. +type AWSIdentityKind string + +var ( + // ControllerIdentityKind defines identity reference kind as AWSClusterControllerIdentity. + ControllerIdentityKind = AWSIdentityKind("AWSClusterControllerIdentity") + + // ClusterRoleIdentityKind defines identity reference kind as AWSClusterRoleIdentity. + ClusterRoleIdentityKind = AWSIdentityKind("AWSClusterRoleIdentity") + + // ClusterStaticIdentityKind defines identity reference kind as AWSClusterStaticIdentity. + ClusterStaticIdentityKind = AWSIdentityKind("AWSClusterStaticIdentity") +) + +// AWSIdentityReference specifies a identity. +type AWSIdentityReference struct { + // Name of the identity. + // +kubebuilder:validation:MinLength=1 + Name string `json:"name"` + + // Kind of the identity. + // +kubebuilder:validation:Enum=AWSClusterControllerIdentity;AWSClusterRoleIdentity;AWSClusterStaticIdentity + Kind AWSIdentityKind `json:"kind"` +} + +// Bastion defines a bastion host. +type Bastion struct { + // Enabled allows this provider to create a bastion host instance + // with a public ip to access the VPC private network. + // +optional + Enabled bool `json:"enabled"` + + // DisableIngressRules will ensure there are no Ingress rules in the bastion host's security group. + // Requires AllowedCIDRBlocks to be empty. + // +optional + DisableIngressRules bool `json:"disableIngressRules,omitempty"` + + // AllowedCIDRBlocks is a list of CIDR blocks allowed to access the bastion host. + // They are set as ingress rules for the Bastion host's Security Group (defaults to 0.0.0.0/0). + // +optional + AllowedCIDRBlocks []string `json:"allowedCIDRBlocks,omitempty"` + + // InstanceType will use the specified instance type for the bastion. If not specified, + // Cluster API Provider AWS will use t3.micro for all regions except us-east-1, where t2.micro + // will be the default. + InstanceType string `json:"instanceType,omitempty"` + + // AMI will use the specified AMI to boot the bastion. If not specified, + // the AMI will default to one picked out in public space. + // +optional + AMI string `json:"ami,omitempty"` +} + +// LoadBalancerType defines the type of load balancer to use. +type LoadBalancerType string + +var ( + // LoadBalancerTypeClassic is the classic ELB type. + LoadBalancerTypeClassic = LoadBalancerType("classic") + // LoadBalancerTypeELB is the ELB type. + LoadBalancerTypeELB = LoadBalancerType("elb") + // LoadBalancerTypeALB is the ALB type. + LoadBalancerTypeALB = LoadBalancerType("alb") + // LoadBalancerTypeNLB is the NLB type. + LoadBalancerTypeNLB = LoadBalancerType("nlb") + // LoadBalancerTypeDisabled disables the load balancer. + LoadBalancerTypeDisabled = LoadBalancerType("disabled") +) + +// AWSLoadBalancerSpec defines the desired state of an AWS load balancer. +type AWSLoadBalancerSpec struct { + // Name sets the name of the classic ELB load balancer. As per AWS, the name must be unique + // within your set of load balancers for the region, must have a maximum of 32 characters, must + // contain only alphanumeric characters or hyphens, and cannot begin or end with a hyphen. Once + // set, the value cannot be changed. + // +kubebuilder:validation:MaxLength:=32 + // +kubebuilder:validation:Pattern=`^[A-Za-z0-9]([A-Za-z0-9]{0,31}|[-A-Za-z0-9]{0,30}[A-Za-z0-9])$` + // +optional + Name *string `json:"name,omitempty"` + + // Scheme sets the scheme of the load balancer (defaults to internet-facing) + // +kubebuilder:default=internet-facing + // +kubebuilder:validation:Enum=internet-facing;internal + // +optional + Scheme *ELBScheme `json:"scheme,omitempty"` + + // CrossZoneLoadBalancing enables the classic ELB cross availability zone balancing. + // + // With cross-zone load balancing, each load balancer node for your Classic Load Balancer + // distributes requests evenly across the registered instances in all enabled Availability Zones. + // If cross-zone load balancing is disabled, each load balancer node distributes requests evenly across + // the registered instances in its Availability Zone only. + // + // Defaults to false. + // +optional + CrossZoneLoadBalancing bool `json:"crossZoneLoadBalancing"` + + // Subnets sets the subnets that should be applied to the control plane load balancer (defaults to discovered subnets for managed VPCs or an empty set for unmanaged VPCs) + // +optional + Subnets []string `json:"subnets,omitempty"` + + // HealthCheckProtocol sets the protocol type for ELB health check target + // default value is ELBProtocolSSL + // +kubebuilder:validation:Enum=TCP;SSL;HTTP;HTTPS;TLS;UDP + // +optional + HealthCheckProtocol *ELBProtocol `json:"healthCheckProtocol,omitempty"` + + // HealthCheck sets custom health check configuration to the API target group. + // +optional + HealthCheck *TargetGroupHealthCheckAPISpec `json:"healthCheck,omitempty"` + + // AdditionalSecurityGroups sets the security groups used by the load balancer. Expected to be security group IDs + // This is optional - if not provided new security groups will be created for the load balancer + // +optional + AdditionalSecurityGroups []string `json:"additionalSecurityGroups,omitempty"` + + // AdditionalListeners sets the additional listeners for the control plane load balancer. + // This is only applicable to Network Load Balancer (NLB) types for the time being. + // +listType=map + // +listMapKey=port + // +optional + AdditionalListeners []AdditionalListenerSpec `json:"additionalListeners,omitempty"` + + // IngressRules sets the ingress rules for the control plane load balancer. + // +optional + IngressRules []IngressRule `json:"ingressRules,omitempty"` + + // LoadBalancerType sets the type for a load balancer. The default type is classic. + // +kubebuilder:default=classic + // +kubebuilder:validation:Enum:=classic;elb;alb;nlb;disabled + LoadBalancerType LoadBalancerType `json:"loadBalancerType,omitempty"` + + // DisableHostsRewrite disabled the hair pinning issue solution that adds the NLB's address as 127.0.0.1 to the hosts + // file of each instance. This is by default, false. + DisableHostsRewrite bool `json:"disableHostsRewrite,omitempty"` + + // PreserveClientIP lets the user control if preservation of client ips must be retained or not. + // If this is enabled 6443 will be opened to 0.0.0.0/0. + PreserveClientIP bool `json:"preserveClientIP,omitempty"` +} + +// AdditionalListenerSpec defines the desired state of an +// additional listener on an AWS load balancer. +type AdditionalListenerSpec struct { + // Port sets the port for the additional listener. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + Port int64 `json:"port"` + + // Protocol sets the protocol for the additional listener. + // Currently only TCP is supported. + // +kubebuilder:validation:Enum=TCP + // +kubebuilder:default=TCP + Protocol ELBProtocol `json:"protocol,omitempty"` + + // HealthCheck sets the optional custom health check configuration to the API target group. + // +optional + HealthCheck *TargetGroupHealthCheckAdditionalSpec `json:"healthCheck,omitempty"` +} + +// AWSClusterStatus defines the observed state of AWSCluster. +type AWSClusterStatus struct { + // +kubebuilder:default=false + Ready bool `json:"ready"` + Network NetworkStatus `json:"networkStatus,omitempty"` + FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + Bastion *Instance `json:"bastion,omitempty"` + Conditions clusterv1.Conditions `json:"conditions,omitempty"` +} + +// S3Bucket defines a supporting S3 bucket for the cluster, currently can be optionally used for Ignition. +type S3Bucket struct { + // ControlPlaneIAMInstanceProfile is a name of the IAMInstanceProfile, which will be allowed + // to read control-plane node bootstrap data from S3 Bucket. + // +optional + ControlPlaneIAMInstanceProfile string `json:"controlPlaneIAMInstanceProfile,omitempty"` + + // NodesIAMInstanceProfiles is a list of IAM instance profiles, which will be allowed to read + // worker nodes bootstrap data from S3 Bucket. + // +optional + NodesIAMInstanceProfiles []string `json:"nodesIAMInstanceProfiles,omitempty"` + + // PresignedURLDuration defines the duration for which presigned URLs are valid. + // + // This is used to generate presigned URLs for S3 Bucket objects, which are used by + // control-plane and worker nodes to fetch bootstrap data. + // + // When enabled, the IAM instance profiles specified are not used. + // +optional + PresignedURLDuration *metav1.Duration `json:"presignedURLDuration,omitempty"` + + // Name defines name of S3 Bucket to be created. + // +kubebuilder:validation:MinLength:=3 + // +kubebuilder:validation:MaxLength:=63 + // +kubebuilder:validation:Pattern=`^[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$` + Name string `json:"name"` + + // BestEffortDeleteObjects defines whether access/permission errors during object deletion should be ignored. + // +optional + BestEffortDeleteObjects *bool `json:"bestEffortDeleteObjects,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=awsclusters,scope=Namespaced,categories=cluster-api,shortName=awsc +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AWSCluster belongs" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Cluster infrastructure is ready for EC2 instances" +// +kubebuilder:printcolumn:name="VPC",type="string",JSONPath=".spec.network.vpc.id",description="AWS VPC the cluster is using" +// +kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".spec.controlPlaneEndpoint",description="API Endpoint",priority=1 +// +kubebuilder:printcolumn:name="Bastion IP",type="string",JSONPath=".status.bastion.publicIp",description="Bastion IP address for breakglass access" +// +k8s:defaulter-gen=true + +// AWSCluster is the schema for Amazon EC2 based Kubernetes Cluster API. +type AWSCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AWSClusterSpec `json:"spec,omitempty"` + Status AWSClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AWSClusterList contains a list of AWSCluster. +// +k8s:defaulter-gen=true +type AWSClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AWSCluster `json:"items"` +} + +// GetConditions returns the observations of the operational state of the AWSCluster resource. +func (r *AWSCluster) GetConditions() clusterv1.Conditions { + return r.Status.Conditions +} + +// SetConditions sets the underlying service state of the AWSCluster to the predescribed clusterv1.Conditions. +func (r *AWSCluster) SetConditions(conditions clusterv1.Conditions) { + r.Status.Conditions = conditions +} + +func init() { + SchemeBuilder.Register(&AWSCluster{}, &AWSClusterList{}) +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awscluster_webhook.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awscluster_webhook.go new file mode 100644 index 000000000..5dbfd7a59 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awscluster_webhook.go @@ -0,0 +1,381 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "fmt" + "strings" + + "github.com/google/go-cmp/cmp" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/annotations" +) + +// log is for logging in this package. +var _ = ctrl.Log.WithName("awscluster-resource") + +func (r *AWSCluster) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta2-awscluster,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclusters,versions=v1beta2,name=validation.awscluster.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 +// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta2-awscluster,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclusters,versions=v1beta2,name=default.awscluster.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + +var ( + _ webhook.Validator = &AWSCluster{} + _ webhook.Defaulter = &AWSCluster{} +) + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type. +func (r *AWSCluster) ValidateCreate() (admission.Warnings, error) { + var allErrs field.ErrorList + + allErrs = append(allErrs, r.Spec.Bastion.Validate()...) + allErrs = append(allErrs, r.validateSSHKeyName()...) + allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...) + allErrs = append(allErrs, r.Spec.S3Bucket.Validate()...) + allErrs = append(allErrs, r.validateNetwork()...) + allErrs = append(allErrs, r.validateControlPlaneLBs()...) + + return nil, aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type. +func (r *AWSCluster) ValidateDelete() (admission.Warnings, error) { + return nil, nil +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. +func (r *AWSCluster) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + var allErrs field.ErrorList + + allErrs = append(allErrs, r.validateGCTasksAnnotation()...) + + oldC, ok := old.(*AWSCluster) + if !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected an AWSCluster but got a %T", old)) + } + + if r.Spec.Region != oldC.Spec.Region { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec", "region"), r.Spec.Region, "field is immutable"), + ) + } + + // Validate the control plane load balancers. + lbs := map[*AWSLoadBalancerSpec]*AWSLoadBalancerSpec{ + oldC.Spec.ControlPlaneLoadBalancer: r.Spec.ControlPlaneLoadBalancer, + oldC.Spec.SecondaryControlPlaneLoadBalancer: r.Spec.SecondaryControlPlaneLoadBalancer, + } + + for oldLB, newLB := range lbs { + if oldLB == nil && newLB == nil { + continue + } + + allErrs = append(allErrs, r.validateControlPlaneLoadBalancerUpdate(oldLB, newLB)...) + } + + if !cmp.Equal(oldC.Spec.ControlPlaneEndpoint, clusterv1.APIEndpoint{}) && + !cmp.Equal(r.Spec.ControlPlaneEndpoint, oldC.Spec.ControlPlaneEndpoint) { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec", "controlPlaneEndpoint"), r.Spec.ControlPlaneEndpoint, "field is immutable"), + ) + } + + // Modifying VPC id is not allowed because it will cause a new VPC creation if set to nil. + if !cmp.Equal(oldC.Spec.NetworkSpec, NetworkSpec{}) && + !cmp.Equal(oldC.Spec.NetworkSpec.VPC, VPCSpec{}) && + oldC.Spec.NetworkSpec.VPC.ID != "" { + if cmp.Equal(r.Spec.NetworkSpec, NetworkSpec{}) || + cmp.Equal(r.Spec.NetworkSpec.VPC, VPCSpec{}) || + oldC.Spec.NetworkSpec.VPC.ID != r.Spec.NetworkSpec.VPC.ID { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec", "network", "vpc", "id"), + r.Spec.NetworkSpec.VPC.ID, "field cannot be modified once set")) + } + } + + // If a identityRef is already set, do not allow removal of it. + if oldC.Spec.IdentityRef != nil && r.Spec.IdentityRef == nil { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec", "identityRef"), + r.Spec.IdentityRef, "field cannot be set to nil"), + ) + } + + if annotations.IsExternallyManaged(oldC) && !annotations.IsExternallyManaged(r) { + allErrs = append(allErrs, + field.Invalid(field.NewPath("metadata", "annotations"), + r.Annotations, "removal of externally managed annotation is not allowed"), + ) + } + + allErrs = append(allErrs, r.Spec.Bastion.Validate()...) + allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...) + allErrs = append(allErrs, r.Spec.S3Bucket.Validate()...) + + return nil, aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs) +} + +func (r *AWSCluster) validateControlPlaneLoadBalancerUpdate(oldlb, newlb *AWSLoadBalancerSpec) field.ErrorList { + var allErrs field.ErrorList + + if oldlb == nil { + // If old scheme was nil, the only value accepted here is the default value: internet-facing + if newlb.Scheme != nil && newlb.Scheme.String() != ELBSchemeInternetFacing.String() { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "scheme"), + newlb.Scheme, "field is immutable, default value was set to internet-facing"), + ) + } + } else { + // A disabled Load Balancer has many implications that must be treated as immutable/ + // this is mostly used by externally managed Control Plane, and there's no need to support type changes. + // More info: https://kubernetes.slack.com/archives/CD6U2V71N/p1708983246100859?thread_ts=1708973478.410979&cid=CD6U2V71N + if (oldlb.LoadBalancerType == LoadBalancerTypeDisabled && newlb.LoadBalancerType != LoadBalancerTypeDisabled) || + (newlb.LoadBalancerType == LoadBalancerTypeDisabled && oldlb.LoadBalancerType != LoadBalancerTypeDisabled) { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "type"), + newlb.Scheme, "field is immutable when created of disabled type"), + ) + } + // If old scheme was not nil, the new scheme should be the same. + if !cmp.Equal(oldlb.Scheme, newlb.Scheme) { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "scheme"), + newlb.Scheme, "field is immutable"), + ) + } + // The name must be defined when the AWSCluster is created. If it is not defined, + // then the controller generates a default name at runtime, but does not store it, + // so the name remains nil. In either case, the name cannot be changed. + if !cmp.Equal(oldlb.Name, newlb.Name) { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "name"), + newlb.Name, "field is immutable"), + ) + } + } + + // Block the update for Protocol : + // - if it was not set in old spec but added in new spec + // - if it was set in old spec but changed in new spec + if !cmp.Equal(newlb.HealthCheckProtocol, oldlb.HealthCheckProtocol) { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "healthCheckProtocol"), + newlb.HealthCheckProtocol, "field is immutable once set"), + ) + } + + return allErrs +} + +// Default satisfies the defaulting webhook interface. +func (r *AWSCluster) Default() { + SetObjectDefaults_AWSCluster(r) +} + +func (r *AWSCluster) validateGCTasksAnnotation() field.ErrorList { + var allErrs field.ErrorList + + annotations := r.GetAnnotations() + if annotations == nil { + return nil + } + + if gcTasksAnnotationValue := annotations[ExternalResourceGCTasksAnnotation]; gcTasksAnnotationValue != "" { + gcTasks := strings.Split(gcTasksAnnotationValue, ",") + + supportedGCTasks := []GCTask{GCTaskLoadBalancer, GCTaskTargetGroup, GCTaskSecurityGroup} + + for _, gcTask := range gcTasks { + found := false + + for _, supportedGCTask := range supportedGCTasks { + if gcTask == string(supportedGCTask) { + found = true + break + } + } + + if !found { + allErrs = append(allErrs, + field.Invalid(field.NewPath("metadata", "annotations"), + r.Annotations, + fmt.Sprintf("annotation %s contains unsupported GC task %s", ExternalResourceGCTasksAnnotation, gcTask)), + ) + } + } + } + + return allErrs +} + +func (r *AWSCluster) validateSSHKeyName() field.ErrorList { + return validateSSHKeyName(r.Spec.SSHKeyName) +} + +func (r *AWSCluster) validateNetwork() field.ErrorList { + var allErrs field.ErrorList + if r.Spec.NetworkSpec.VPC.IsIPv6Enabled() { + allErrs = append(allErrs, field.Invalid(field.NewPath("ipv6"), r.Spec.NetworkSpec.VPC.IPv6, "IPv6 cannot be used with unmanaged clusters at this time.")) + } + for _, subnet := range r.Spec.NetworkSpec.Subnets { + if subnet.IsIPv6 || subnet.IPv6CidrBlock != "" { + allErrs = append(allErrs, field.Invalid(field.NewPath("subnets"), r.Spec.NetworkSpec.Subnets, "IPv6 cannot be used with unmanaged clusters at this time.")) + } + if subnet.ZoneType != nil && subnet.IsEdge() { + if subnet.ParentZoneName == nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("subnets"), r.Spec.NetworkSpec.Subnets, "ParentZoneName must be set when ZoneType is 'local-zone'.")) + } + } + } + + if r.Spec.NetworkSpec.VPC.CidrBlock != "" && r.Spec.NetworkSpec.VPC.IPAMPool != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("cidrBlock"), r.Spec.NetworkSpec.VPC.CidrBlock, "cidrBlock and ipamPool cannot be used together")) + } + + if r.Spec.NetworkSpec.VPC.IPAMPool != nil && r.Spec.NetworkSpec.VPC.IPAMPool.ID == "" && r.Spec.NetworkSpec.VPC.IPAMPool.Name == "" { + allErrs = append(allErrs, field.Invalid(field.NewPath("ipamPool"), r.Spec.NetworkSpec.VPC.IPAMPool, "ipamPool must have either id or name")) + } + + for _, rule := range r.Spec.NetworkSpec.AdditionalControlPlaneIngressRules { + allErrs = append(allErrs, r.validateIngressRule(rule)...) + } + + if r.Spec.NetworkSpec.VPC.ElasticIPPool != nil { + eipp := r.Spec.NetworkSpec.VPC.ElasticIPPool + if eipp.PublicIpv4Pool != nil { + if eipp.PublicIpv4PoolFallBackOrder == nil { + return append(allErrs, field.Invalid(field.NewPath("elasticIpPool.publicIpv4PoolFallbackOrder"), r.Spec.NetworkSpec.VPC.ElasticIPPool, "publicIpv4PoolFallbackOrder must be set when publicIpv4Pool is defined.")) + } + awsPublicIpv4PoolPrefix := "ipv4pool-ec2-" + if !strings.HasPrefix(*eipp.PublicIpv4Pool, awsPublicIpv4PoolPrefix) { + return append(allErrs, field.Invalid(field.NewPath("elasticIpPool.publicIpv4Pool"), r.Spec.NetworkSpec.VPC.ElasticIPPool, fmt.Sprintf("publicIpv4Pool must start with %s.", awsPublicIpv4PoolPrefix))) + } + } + if eipp.PublicIpv4Pool == nil && eipp.PublicIpv4PoolFallBackOrder != nil { + return append(allErrs, field.Invalid(field.NewPath("elasticIpPool.publicIpv4PoolFallbackOrder"), r.Spec.NetworkSpec.VPC.ElasticIPPool, "publicIpv4Pool must be set when publicIpv4PoolFallbackOrder is defined.")) + } + } + + return allErrs +} + +func (r *AWSCluster) validateControlPlaneLBs() field.ErrorList { + var allErrs field.ErrorList + + // If the secondary is defined, check that the name is not empty and different from the primary. + // Also, ensure that the secondary load balancer is an NLB + if r.Spec.SecondaryControlPlaneLoadBalancer != nil { + if r.Spec.SecondaryControlPlaneLoadBalancer.Name == nil || *r.Spec.SecondaryControlPlaneLoadBalancer.Name == "" { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "secondaryControlPlaneLoadBalancer", "name"), r.Spec.SecondaryControlPlaneLoadBalancer.Name, "secondary controlPlaneLoadBalancer.name cannot be empty")) + } + + if r.Spec.SecondaryControlPlaneLoadBalancer.Name == r.Spec.ControlPlaneLoadBalancer.Name { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "secondaryControlPlaneLoadBalancer", "name"), r.Spec.SecondaryControlPlaneLoadBalancer.Name, "field must be different from controlPlaneLoadBalancer.name")) + } + + if r.Spec.SecondaryControlPlaneLoadBalancer.Scheme.Equals(r.Spec.ControlPlaneLoadBalancer.Scheme) { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "secondaryControlPlaneLoadBalancer", "scheme"), r.Spec.SecondaryControlPlaneLoadBalancer.Scheme, "control plane load balancers must have different schemes")) + } + + if r.Spec.SecondaryControlPlaneLoadBalancer.LoadBalancerType != LoadBalancerTypeNLB { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "secondaryControlPlaneLoadBalancer", "loadBalancerType"), r.Spec.SecondaryControlPlaneLoadBalancer.LoadBalancerType, "secondary control plane load balancer must be a Network Load Balancer")) + } + } + + // Additional listeners are only supported for NLBs. + // Validate the control plane load balancers. + loadBalancers := []*AWSLoadBalancerSpec{ + r.Spec.ControlPlaneLoadBalancer, + r.Spec.SecondaryControlPlaneLoadBalancer, + } + for _, cp := range loadBalancers { + if cp == nil { + continue + } + + for _, rule := range cp.IngressRules { + allErrs = append(allErrs, r.validateIngressRule(rule)...) + } + } + + if r.Spec.ControlPlaneLoadBalancer.LoadBalancerType == LoadBalancerTypeDisabled { + if r.Spec.ControlPlaneLoadBalancer.Name != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "name"), r.Spec.ControlPlaneLoadBalancer.Name, "cannot configure a name if the LoadBalancer reconciliation is disabled")) + } + + if r.Spec.ControlPlaneLoadBalancer.CrossZoneLoadBalancing { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "crossZoneLoadBalancing"), r.Spec.ControlPlaneLoadBalancer.CrossZoneLoadBalancing, "cross-zone load balancing cannot be set if the LoadBalancer reconciliation is disabled")) + } + + if len(r.Spec.ControlPlaneLoadBalancer.Subnets) > 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "subnets"), r.Spec.ControlPlaneLoadBalancer.Subnets, "subnets cannot be set if the LoadBalancer reconciliation is disabled")) + } + + if r.Spec.ControlPlaneLoadBalancer.HealthCheckProtocol != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "healthCheckProtocol"), r.Spec.ControlPlaneLoadBalancer.HealthCheckProtocol, "healthcheck protocol cannot be set if the LoadBalancer reconciliation is disabled")) + } + + if len(r.Spec.ControlPlaneLoadBalancer.AdditionalSecurityGroups) > 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "additionalSecurityGroups"), r.Spec.ControlPlaneLoadBalancer.AdditionalSecurityGroups, "additional Security Groups cannot be set if the LoadBalancer reconciliation is disabled")) + } + + if len(r.Spec.ControlPlaneLoadBalancer.AdditionalListeners) > 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "additionalListeners"), r.Spec.ControlPlaneLoadBalancer.AdditionalListeners, "cannot set additional listeners if the LoadBalancer reconciliation is disabled")) + } + + if len(r.Spec.ControlPlaneLoadBalancer.IngressRules) > 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "ingressRules"), r.Spec.ControlPlaneLoadBalancer.IngressRules, "ingress rules cannot be set if the LoadBalancer reconciliation is disabled")) + } + + if r.Spec.ControlPlaneLoadBalancer.PreserveClientIP { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "preserveClientIP"), r.Spec.ControlPlaneLoadBalancer.PreserveClientIP, "cannot preserve client IP if the LoadBalancer reconciliation is disabled")) + } + + if r.Spec.ControlPlaneLoadBalancer.DisableHostsRewrite { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "disableHostsRewrite"), r.Spec.ControlPlaneLoadBalancer.DisableHostsRewrite, "cannot disable hosts rewrite if the LoadBalancer reconciliation is disabled")) + } + } + + return allErrs +} + +func (r *AWSCluster) validateIngressRule(rule IngressRule) field.ErrorList { + var allErrs field.ErrorList + if rule.NatGatewaysIPsSource { + if rule.CidrBlocks != nil || rule.IPv6CidrBlocks != nil || rule.SourceSecurityGroupIDs != nil || rule.SourceSecurityGroupRoles != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("additionalControlPlaneIngressRules"), r.Spec.NetworkSpec.AdditionalControlPlaneIngressRules, "CIDR blocks and security group IDs or security group roles cannot be used together")) + } + } else { + if (rule.CidrBlocks != nil || rule.IPv6CidrBlocks != nil) && (rule.SourceSecurityGroupIDs != nil || rule.SourceSecurityGroupRoles != nil) { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "ingressRules"), r.Spec.ControlPlaneLoadBalancer.IngressRules, "CIDR blocks and security group IDs or security group roles cannot be used together")) + } + } + return allErrs +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsclustercontrolleridentity_webhook.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsclustercontrolleridentity_webhook.go new file mode 100644 index 000000000..62724bfbc --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsclustercontrolleridentity_webhook.go @@ -0,0 +1,104 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "fmt" + + "github.com/google/go-cmp/cmp" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var _ = ctrl.Log.WithName("awsclustercontrolleridentity-resource") + +func (r *AWSClusterControllerIdentity) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta2-awsclustercontrolleridentity,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclustercontrolleridentities,versions=v1beta2,name=validation.awsclustercontrolleridentity.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 +// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsclustercontrolleridentity,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclustercontrolleridentities,versions=v1beta2,name=default.awsclustercontrolleridentity.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + +var ( + _ webhook.Validator = &AWSClusterControllerIdentity{} + _ webhook.Defaulter = &AWSClusterControllerIdentity{} +) + +// ValidateCreate will do any extra validation when creating an AWSClusterControllerIdentity. +func (r *AWSClusterControllerIdentity) ValidateCreate() (admission.Warnings, error) { + // Ensures AWSClusterControllerIdentity being singleton by only allowing "default" as name + if r.Name != AWSClusterControllerIdentityName { + return nil, field.Invalid(field.NewPath("name"), + r.Name, "AWSClusterControllerIdentity is a singleton and only acceptable name is default") + } + + // Validate selector parses as Selector if AllowedNameSpaces is populated + if r.Spec.AllowedNamespaces != nil { + _, err := metav1.LabelSelectorAsSelector(&r.Spec.AllowedNamespaces.Selector) + if err != nil { + return nil, field.Invalid(field.NewPath("spec", "allowedNamespaces", "selector"), r.Spec.AllowedNamespaces.Selector, err.Error()) + } + } + + return nil, nil +} + +// ValidateDelete allows you to add any extra validation when deleting an AWSClusterControllerIdentity. +func (r *AWSClusterControllerIdentity) ValidateDelete() (admission.Warnings, error) { + return nil, nil +} + +// ValidateUpdate will do any extra validation when updating an AWSClusterControllerIdentity. +func (r *AWSClusterControllerIdentity) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + oldP, ok := old.(*AWSClusterControllerIdentity) + if !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected an AWSClusterControllerIdentity but got a %T", old)) + } + + if !cmp.Equal(r.Spec, oldP.Spec) { + return nil, errors.New("AWSClusterControllerIdentity is immutable") + } + + if r.Name != oldP.Name { + return nil, field.Invalid(field.NewPath("name"), + r.Name, "AWSClusterControllerIdentity is a singleton and only acceptable name is default") + } + + // Validate selector parses as Selector if AllowedNameSpaces is not nil + if r.Spec.AllowedNamespaces != nil { + _, err := metav1.LabelSelectorAsSelector(&r.Spec.AllowedNamespaces.Selector) + if err != nil { + return nil, field.Invalid(field.NewPath("spec", "allowedNamespaces", "selectors"), r.Spec.AllowedNamespaces.Selector, err.Error()) + } + } + + return nil, nil +} + +// Default will set default values for the AWSClusterControllerIdentity. +func (r *AWSClusterControllerIdentity) Default() { + SetDefaults_Labels(&r.ObjectMeta) +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsclusterroleidentity_webhook.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsclusterroleidentity_webhook.go new file mode 100644 index 000000000..c95622b16 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsclusterroleidentity_webhook.go @@ -0,0 +1,98 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var _ = ctrl.Log.WithName("awsclusterroleidentity-resource") + +func (r *AWSClusterRoleIdentity) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta2-awsclusterroleidentity,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclusterroleidentities,versions=v1beta2,name=validation.awsclusterroleidentity.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 +// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsclusterroleidentity,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclusterroleidentities,versions=v1beta2,name=default.awsclusterroleidentity.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + +var ( + _ webhook.Validator = &AWSClusterRoleIdentity{} + _ webhook.Defaulter = &AWSClusterRoleIdentity{} +) + +// ValidateCreate will do any extra validation when creating an AWSClusterRoleIdentity. +func (r *AWSClusterRoleIdentity) ValidateCreate() (admission.Warnings, error) { + if r.Spec.SourceIdentityRef == nil { + return nil, field.Invalid(field.NewPath("spec", "sourceIdentityRef"), + r.Spec.SourceIdentityRef, "field cannot be set to nil") + } + + // Validate selector parses as Selector + if r.Spec.AllowedNamespaces != nil { + _, err := metav1.LabelSelectorAsSelector(&r.Spec.AllowedNamespaces.Selector) + if err != nil { + return nil, field.Invalid(field.NewPath("spec", "allowedNamespaces", "selector"), r.Spec.AllowedNamespaces.Selector, err.Error()) + } + } + + return nil, nil +} + +// ValidateDelete allows you to add any extra validation when deleting an AWSClusterRoleIdentity. +func (r *AWSClusterRoleIdentity) ValidateDelete() (admission.Warnings, error) { + return nil, nil +} + +// ValidateUpdate will do any extra validation when updating an AWSClusterRoleIdentity. +func (r *AWSClusterRoleIdentity) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + oldP, ok := old.(*AWSClusterRoleIdentity) + if !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected an AWSClusterRoleIdentity but got a %T", old)) + } + + // If a SourceIdentityRef is set, do not allow removal of it. + if oldP.Spec.SourceIdentityRef != nil && r.Spec.SourceIdentityRef == nil { + return nil, field.Invalid(field.NewPath("spec", "sourceIdentityRef"), + r.Spec.SourceIdentityRef, "field cannot be set to nil") + } + + // Validate selector parses as Selector + if r.Spec.AllowedNamespaces != nil { + _, err := metav1.LabelSelectorAsSelector(&r.Spec.AllowedNamespaces.Selector) + if err != nil { + return nil, field.Invalid(field.NewPath("spec", "allowedNamespaces", "selector"), r.Spec.AllowedNamespaces.Selector, err.Error()) + } + } + + return nil, nil +} + +// Default will set default values for the AWSClusterRoleIdentity. +func (r *AWSClusterRoleIdentity) Default() { + SetDefaults_Labels(&r.ObjectMeta) +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsclusterstaticidentity_webhook.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsclusterstaticidentity_webhook.go new file mode 100644 index 000000000..e98b8dd34 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsclusterstaticidentity_webhook.go @@ -0,0 +1,92 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var _ = ctrl.Log.WithName("awsclusterstaticidentity-resource") + +func (r *AWSClusterStaticIdentity) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta2-awsclusterstaticidentity,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclusterstaticidentities,versions=v1beta2,name=validation.awsclusterstaticidentity.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 +// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsclusterstaticidentity,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclusterstaticidentities,versions=v1beta2,name=default.awsclusterstaticidentity.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + +var ( + _ webhook.Validator = &AWSClusterStaticIdentity{} + _ webhook.Defaulter = &AWSClusterStaticIdentity{} +) + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type. +func (r *AWSClusterStaticIdentity) ValidateCreate() (admission.Warnings, error) { + // Validate selector parses as Selector + if r.Spec.AllowedNamespaces != nil { + _, err := metav1.LabelSelectorAsSelector(&r.Spec.AllowedNamespaces.Selector) + if err != nil { + return nil, field.Invalid(field.NewPath("spec", "allowedNamespaces", "selector"), r.Spec.AllowedNamespaces.Selector, err.Error()) + } + } + + return nil, nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type. +func (r *AWSClusterStaticIdentity) ValidateDelete() (admission.Warnings, error) { + return nil, nil +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. +func (r *AWSClusterStaticIdentity) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + oldP, ok := old.(*AWSClusterStaticIdentity) + if !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected an AWSClusterStaticIdentity but got a %T", old)) + } + + if oldP.Spec.SecretRef != r.Spec.SecretRef { + return nil, field.Invalid(field.NewPath("spec", "secretRef"), + r.Spec.SecretRef, "field cannot be updated") + } + + // Validate selector parses as Selector + if r.Spec.AllowedNamespaces != nil { + _, err := metav1.LabelSelectorAsSelector(&r.Spec.AllowedNamespaces.Selector) + if err != nil { + return nil, field.Invalid(field.NewPath("spec", "allowedNamespaces", "selector"), r.Spec.AllowedNamespaces.Selector, err.Error()) + } + } + + return nil, nil +} + +// Default should return the default AWSClusterStaticIdentity. +func (r *AWSClusterStaticIdentity) Default() { + SetDefaults_Labels(&r.ObjectMeta) +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsclustertemplate_types.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsclustertemplate_types.go new file mode 100644 index 000000000..e0a827fa3 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsclustertemplate_types.go @@ -0,0 +1,64 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +// AWSClusterTemplateSpec defines the desired state of AWSClusterTemplate. +type AWSClusterTemplateSpec struct { + Template AWSClusterTemplateResource `json:"template"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=awsclustertemplates,scope=Namespaced,categories=cluster-api,shortName=awsct +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since creation of AWSClusterTemplate" +// +k8s:defaulter-gen=true + +// AWSClusterTemplate is the schema for Amazon EC2 based Kubernetes Cluster Templates. +type AWSClusterTemplate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AWSClusterTemplateSpec `json:"spec,omitempty"` +} + +//+kubebuilder:object:root=true + +// AWSClusterTemplateList contains a list of AWSClusterTemplate. +type AWSClusterTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AWSClusterTemplate `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AWSClusterTemplate{}, &AWSClusterTemplateList{}) +} + +// AWSClusterTemplateResource defines the desired state of AWSClusterTemplateResource. +type AWSClusterTemplateResource struct { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty"` + Spec AWSClusterSpec `json:"spec"` +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsclustertemplate_webhook.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsclustertemplate_webhook.go new file mode 100644 index 000000000..95cab6c1c --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsclustertemplate_webhook.go @@ -0,0 +1,69 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "github.com/google/go-cmp/cmp" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +func (r *AWSClusterTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta2-awsclustertemplate,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclustertemplates,versions=v1beta2,name=validation.awsclustertemplate.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 +// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsclustertemplate,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsclustertemplates,versions=v1beta2,name=default.awsclustertemplate.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + +var _ webhook.Defaulter = &AWSClusterTemplate{} +var _ webhook.Validator = &AWSClusterTemplate{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type. +func (r *AWSClusterTemplate) Default() { + SetObjectDefaults_AWSClusterTemplate(r) +} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type. +func (r *AWSClusterTemplate) ValidateCreate() (admission.Warnings, error) { + var allErrs field.ErrorList + + allErrs = append(allErrs, r.Spec.Template.Spec.Bastion.Validate()...) + allErrs = append(allErrs, validateSSHKeyName(r.Spec.Template.Spec.SSHKeyName)...) + + return nil, aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. +func (r *AWSClusterTemplate) ValidateUpdate(oldRaw runtime.Object) (admission.Warnings, error) { + old := oldRaw.(*AWSClusterTemplate) + + if !cmp.Equal(r.Spec, old.Spec) { + return nil, apierrors.NewBadRequest("AWSClusterTemplate.Spec is immutable") + } + return nil, nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type. +func (r *AWSClusterTemplate) ValidateDelete() (admission.Warnings, error) { + return nil, nil +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsidentity_types.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsidentity_types.go new file mode 100644 index 000000000..27b56d78d --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsidentity_types.go @@ -0,0 +1,193 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// AWSClusterIdentitySpec defines the Spec struct for AWSClusterIdentity types. +type AWSClusterIdentitySpec struct { + // AllowedNamespaces is used to identify which namespaces are allowed to use the identity from. + // Namespaces can be selected either using an array of namespaces or with label selector. + // An empty allowedNamespaces object indicates that AWSClusters can use this identity from any namespace. + // If this object is nil, no namespaces will be allowed (default behaviour, if this field is not provided) + // A namespace should be either in the NamespaceList or match with Selector to use the identity. + // + // +optional + // +nullable + AllowedNamespaces *AllowedNamespaces `json:"allowedNamespaces"` +} + +// AllowedNamespaces is a selector of namespaces that AWSClusters can +// use this ClusterPrincipal from. This is a standard Kubernetes LabelSelector, +// a label query over a set of resources. The result of matchLabels and +// matchExpressions are ANDed. +type AllowedNamespaces struct { + // An nil or empty list indicates that AWSClusters cannot use the identity from any namespace. + // + // +optional + // +nullable + NamespaceList []string `json:"list"` + + // An empty selector indicates that AWSClusters cannot use this + // AWSClusterIdentity from any namespace. + // +optional + Selector metav1.LabelSelector `json:"selector"` +} + +// AWSRoleSpec defines the specifications for all identities based around AWS roles. +type AWSRoleSpec struct { + // The Amazon Resource Name (ARN) of the role to assume. + RoleArn string `json:"roleARN"` + // An identifier for the assumed role session + SessionName string `json:"sessionName,omitempty"` + // The duration, in seconds, of the role session before it is renewed. + // +kubebuilder:validation:Minimum:=900 + // +kubebuilder:validation:Maximum:=43200 + DurationSeconds int32 `json:"durationSeconds,omitempty"` + // An IAM policy as a JSON-encoded string that you want to use as an inline session policy. + InlinePolicy string `json:"inlinePolicy,omitempty"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. + // The policies must exist in the same account as the role. + PolicyARNs []string `json:"policyARNs,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=awsclusterstaticidentities,scope=Cluster,categories=cluster-api,shortName=awssi +// +kubebuilder:storageversion +// +k8s:defaulter-gen=true + +// AWSClusterStaticIdentity is the Schema for the awsclusterstaticidentities API +// It represents a reference to an AWS access key ID and secret access key, stored in a secret. +type AWSClusterStaticIdentity struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec for this AWSClusterStaticIdentity + Spec AWSClusterStaticIdentitySpec `json:"spec,omitempty"` +} + +// +kubebuilder:object:root=true +// +k8s:defaulter-gen=true + +// AWSClusterStaticIdentityList contains a list of AWSClusterStaticIdentity. +type AWSClusterStaticIdentityList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AWSClusterStaticIdentity `json:"items"` +} + +// AWSClusterStaticIdentitySpec defines the specifications for AWSClusterStaticIdentity. +type AWSClusterStaticIdentitySpec struct { + AWSClusterIdentitySpec `json:",inline"` + // Reference to a secret containing the credentials. The secret should + // contain the following data keys: + // AccessKeyID: AKIAIOSFODNN7EXAMPLE + // SecretAccessKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY + // SessionToken: Optional + SecretRef string `json:"secretRef"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=awsclusterroleidentities,scope=Cluster,categories=cluster-api,shortName=awsri +// +kubebuilder:storageversion +// +k8s:defaulter-gen=true + +// AWSClusterRoleIdentity is the Schema for the awsclusterroleidentities API +// It is used to assume a role using the provided sourceRef. +type AWSClusterRoleIdentity struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec for this AWSClusterRoleIdentity. + Spec AWSClusterRoleIdentitySpec `json:"spec,omitempty"` +} + +// +kubebuilder:object:root=true +// +k8s:defaulter-gen=true + +// AWSClusterRoleIdentityList contains a list of AWSClusterRoleIdentity. +type AWSClusterRoleIdentityList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AWSClusterRoleIdentity `json:"items"` +} + +// AWSClusterRoleIdentitySpec defines the specifications for AWSClusterRoleIdentity. +type AWSClusterRoleIdentitySpec struct { + AWSClusterIdentitySpec `json:",inline"` + AWSRoleSpec `json:",inline"` + // A unique identifier that might be required when you assume a role in another account. + // If the administrator of the account to which the role belongs provided you with an + // external ID, then provide that value in the ExternalId parameter. This value can be + // any string, such as a passphrase or account number. A cross-account role is usually + // set up to trust everyone in an account. Therefore, the administrator of the trusting + // account might send an external ID to the administrator of the trusted account. That + // way, only someone with the ID can assume the role, rather than everyone in the + // account. For more information about the external ID, see How to Use an External ID + // When Granting Access to Your AWS Resources to a Third Party in the IAM User Guide. + // +optional + ExternalID string `json:"externalID,omitempty"` + + // SourceIdentityRef is a reference to another identity which will be chained to do + // role assumption. All identity types are accepted. + SourceIdentityRef *AWSIdentityReference `json:"sourceIdentityRef,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=awsclustercontrolleridentities,scope=Cluster,categories=cluster-api,shortName=awsci +// +kubebuilder:storageversion +// +k8s:defaulter-gen=true + +// AWSClusterControllerIdentity is the Schema for the awsclustercontrolleridentities API +// It is used to grant access to use Cluster API Provider AWS Controller credentials. +type AWSClusterControllerIdentity struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec for this AWSClusterControllerIdentity. + Spec AWSClusterControllerIdentitySpec `json:"spec,omitempty"` +} + +// +kubebuilder:object:root=true +// +k8s:defaulter-gen=true + +// AWSClusterControllerIdentityList contains a list of AWSClusterControllerIdentity. +type AWSClusterControllerIdentityList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AWSClusterControllerIdentity `json:"items"` +} + +// AWSClusterControllerIdentitySpec defines the specifications for AWSClusterControllerIdentity. +type AWSClusterControllerIdentitySpec struct { + AWSClusterIdentitySpec `json:",inline"` +} + +func init() { + SchemeBuilder.Register( + &AWSClusterStaticIdentity{}, + &AWSClusterStaticIdentityList{}, + &AWSClusterRoleIdentity{}, + &AWSClusterRoleIdentityList{}, + &AWSClusterControllerIdentity{}, + &AWSClusterControllerIdentityList{}, + ) +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsmachine_types.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsmachine_types.go new file mode 100644 index 000000000..39a649a0e --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsmachine_types.go @@ -0,0 +1,422 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/errors" +) + +const ( + // MachineFinalizer allows ReconcileAWSMachine to clean up AWS resources associated with AWSMachine before + // removing it from the apiserver. + MachineFinalizer = "awsmachine.infrastructure.cluster.x-k8s.io" + + // DefaultIgnitionVersion represents default Ignition version generated for machine userdata. + DefaultIgnitionVersion = "2.3" +) + +// SecretBackend defines variants for backend secret storage. +type SecretBackend string + +var ( + // SecretBackendSSMParameterStore defines AWS Systems Manager Parameter Store as the secret backend. + SecretBackendSSMParameterStore = SecretBackend("ssm-parameter-store") + + // SecretBackendSecretsManager defines AWS Secrets Manager as the secret backend. + SecretBackendSecretsManager = SecretBackend("secrets-manager") +) + +// IgnitionStorageTypeOption defines the different storage types for Ignition. +type IgnitionStorageTypeOption string + +const ( + // IgnitionStorageTypeOptionClusterObjectStore means the chosen Ignition storage type is ClusterObjectStore. + IgnitionStorageTypeOptionClusterObjectStore = IgnitionStorageTypeOption("ClusterObjectStore") + + // IgnitionStorageTypeOptionUnencryptedUserData means the chosen Ignition storage type is UnencryptedUserData. + IgnitionStorageTypeOptionUnencryptedUserData = IgnitionStorageTypeOption("UnencryptedUserData") +) + +// AWSMachineSpec defines the desired state of an Amazon EC2 instance. +type AWSMachineSpec struct { + // ProviderID is the unique identifier as specified by the cloud provider. + ProviderID *string `json:"providerID,omitempty"` + + // InstanceID is the EC2 instance ID for this machine. + InstanceID *string `json:"instanceID,omitempty"` + + // InstanceMetadataOptions is the metadata options for the EC2 instance. + // +optional + InstanceMetadataOptions *InstanceMetadataOptions `json:"instanceMetadataOptions,omitempty"` + + // AMI is the reference to the AMI from which to create the machine instance. + AMI AMIReference `json:"ami,omitempty"` + + // ImageLookupFormat is the AMI naming format to look up the image for this + // machine It will be ignored if an explicit AMI is set. Supports + // substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and + // kubernetes version, respectively. The BaseOS will be the value in + // ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as + // defined by the packages produced by kubernetes/release without v as a + // prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default + // image format of capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-* will end up + // searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a + // Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See + // also: https://golang.org/pkg/text/template/ + // +optional + ImageLookupFormat string `json:"imageLookupFormat,omitempty"` + + // ImageLookupOrg is the AWS Organization ID to use for image lookup if AMI is not set. + ImageLookupOrg string `json:"imageLookupOrg,omitempty"` + + // ImageLookupBaseOS is the name of the base operating system to use for + // image lookup the AMI is not set. + ImageLookupBaseOS string `json:"imageLookupBaseOS,omitempty"` + + // InstanceType is the type of instance to create. Example: m4.xlarge + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength:=2 + InstanceType string `json:"instanceType"` + + // AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the + // AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the + // AWSMachine's value takes precedence. + // +optional + AdditionalTags Tags `json:"additionalTags,omitempty"` + + // IAMInstanceProfile is a name of an IAM instance profile to assign to the instance + // +optional + IAMInstanceProfile string `json:"iamInstanceProfile,omitempty"` + + // PublicIP specifies whether the instance should get a public IP. + // Precedence for this setting is as follows: + // 1. This field if set + // 2. Cluster/flavor setting + // 3. Subnet default + // +optional + PublicIP *bool `json:"publicIP,omitempty"` + + // ElasticIPPool is the configuration to allocate Public IPv4 address (Elastic IP/EIP) from user-defined pool. + // + // +optional + ElasticIPPool *ElasticIPPool `json:"elasticIpPool,omitempty"` + + // AdditionalSecurityGroups is an array of references to security groups that should be applied to the + // instance. These security groups would be set in addition to any security groups defined + // at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters + // will cause additional requests to AWS API and if tags change the attached security groups might change too. + // +optional + AdditionalSecurityGroups []AWSResourceReference `json:"additionalSecurityGroups,omitempty"` + + // Subnet is a reference to the subnet to use for this instance. If not specified, + // the cluster subnet will be used. + // +optional + Subnet *AWSResourceReference `json:"subnet,omitempty"` + + // SecurityGroupOverrides is an optional set of security groups to use for the node. + // This is optional - if not provided security groups from the cluster will be used. + // +optional + SecurityGroupOverrides map[SecurityGroupRole]string `json:"securityGroupOverrides,omitempty"` + + // SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name) + // +optional + SSHKeyName *string `json:"sshKeyName,omitempty"` + + // RootVolume encapsulates the configuration options for the root volume + // +optional + RootVolume *Volume `json:"rootVolume,omitempty"` + + // Configuration options for the non root storage volumes. + // +optional + NonRootVolumes []Volume `json:"nonRootVolumes,omitempty"` + + // NetworkInterfaces is a list of ENIs to associate with the instance. + // A maximum of 2 may be specified. + // +optional + // +kubebuilder:validation:MaxItems=2 + NetworkInterfaces []string `json:"networkInterfaces,omitempty"` + + // UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance. + // cloud-init has built-in support for gzip-compressed user data + // user data stored in aws secret manager is always gzip-compressed. + // + // +optional + UncompressedUserData *bool `json:"uncompressedUserData,omitempty"` + + // CloudInit defines options related to the bootstrapping systems where + // CloudInit is used. + // +optional + CloudInit CloudInit `json:"cloudInit,omitempty"` + + // Ignition defined options related to the bootstrapping systems where Ignition is used. + // +optional + Ignition *Ignition `json:"ignition,omitempty"` + + // SpotMarketOptions allows users to configure instances to be run using AWS Spot instances. + // +optional + SpotMarketOptions *SpotMarketOptions `json:"spotMarketOptions,omitempty"` + + // PlacementGroupName specifies the name of the placement group in which to launch the instance. + // +optional + PlacementGroupName string `json:"placementGroupName,omitempty"` + + // PlacementGroupPartition is the partition number within the placement group in which to launch the instance. + // This value is only valid if the placement group, referred in `PlacementGroupName`, was created with + // strategy set to partition. + // +kubebuilder:validation:Minimum:=1 + // +kubebuilder:validation:Maximum:=7 + // +optional + PlacementGroupPartition int64 `json:"placementGroupPartition,omitempty"` + + // Tenancy indicates if instance should run on shared or single-tenant hardware. + // +optional + // +kubebuilder:validation:Enum:=default;dedicated;host + Tenancy string `json:"tenancy,omitempty"` + + // PrivateDNSName is the options for the instance hostname. + // +optional + PrivateDNSName *PrivateDNSName `json:"privateDnsName,omitempty"` + + // CapacityReservationID specifies the target Capacity Reservation into which the instance should be launched. + // +optional + CapacityReservationID *string `json:"capacityReservationId,omitempty"` +} + +// CloudInit defines options related to the bootstrapping systems where +// CloudInit is used. +type CloudInit struct { + // InsecureSkipSecretsManager, when set to true will not use AWS Secrets Manager + // or AWS Systems Manager Parameter Store to ensure privacy of userdata. + // By default, a cloud-init boothook shell script is prepended to download + // the userdata from Secrets Manager and additionally delete the secret. + InsecureSkipSecretsManager bool `json:"insecureSkipSecretsManager,omitempty"` + + // SecretCount is the number of secrets used to form the complete secret + // +optional + SecretCount int32 `json:"secretCount,omitempty"` + + // SecretPrefix is the prefix for the secret name. This is stored + // temporarily, and deleted when the machine registers as a node against + // the workload cluster. + // +optional + SecretPrefix string `json:"secretPrefix,omitempty"` + + // SecureSecretsBackend, when set to parameter-store will utilize the AWS Systems Manager + // Parameter Storage to distribute secrets. By default or with the value of secrets-manager, + // will use AWS Secrets Manager instead. + // +optional + // +kubebuilder:validation:Enum=secrets-manager;ssm-parameter-store + SecureSecretsBackend SecretBackend `json:"secureSecretsBackend,omitempty"` +} + +// Ignition defines options related to the bootstrapping systems where Ignition is used. +// For more information on Ignition configuration, see https://coreos.github.io/butane/specs/ +type Ignition struct { + // Version defines which version of Ignition will be used to generate bootstrap data. + // + // +optional + // +kubebuilder:default="2.3" + // +kubebuilder:validation:Enum="2.3";"3.0";"3.1";"3.2";"3.3";"3.4" + Version string `json:"version,omitempty"` + + // StorageType defines how to store the boostrap user data for Ignition. + // This can be used to instruct Ignition from where to fetch the user data to bootstrap an instance. + // + // When omitted, the storage option will default to ClusterObjectStore. + // + // When set to "ClusterObjectStore", if the capability is available and a Cluster ObjectStore configuration + // is correctly provided in the Cluster object (under .spec.s3Bucket), + // an object store will be used to store bootstrap user data. + // + // When set to "UnencryptedUserData", EC2 Instance User Data will be used to store the machine bootstrap user data, unencrypted. + // This option is considered less secure than others as user data may contain sensitive informations (keys, certificates, etc.) + // and users with ec2:DescribeInstances permission or users running pods + // that can access the ec2 metadata service have access to this sensitive information. + // So this is only to be used at ones own risk, and only when other more secure options are not viable. + // + // +optional + // +kubebuilder:default="ClusterObjectStore" + // +kubebuilder:validation:Enum:="ClusterObjectStore";"UnencryptedUserData" + StorageType IgnitionStorageTypeOption `json:"storageType,omitempty"` + + // Proxy defines proxy settings for Ignition. + // Only valid for Ignition versions 3.1 and above. + // +optional + Proxy *IgnitionProxy `json:"proxy,omitempty"` + + // TLS defines TLS settings for Ignition. + // Only valid for Ignition versions 3.1 and above. + // +optional + TLS *IgnitionTLS `json:"tls,omitempty"` +} + +// IgnitionCASource defines the source of the certificate authority to use for Ignition. +// +kubebuilder:validation:MaxLength:=65536 +type IgnitionCASource string + +// IgnitionTLS defines TLS settings for Ignition. +type IgnitionTLS struct { + // CASources defines the list of certificate authorities to use for Ignition. + // The value is the certificate bundle (in PEM format). The bundle can contain multiple concatenated certificates. + // Supported schemes are http, https, tftp, s3, arn, gs, and `data` (RFC 2397) URL scheme. + // + // +optional + // +kubebuilder:validation:MaxItems=64 + CASources []IgnitionCASource `json:"certificateAuthorities,omitempty"` +} + +// IgnitionNoProxy defines the list of domains to not proxy for Ignition. +// +kubebuilder:validation:MaxLength:=2048 +type IgnitionNoProxy string + +// IgnitionProxy defines proxy settings for Ignition. +type IgnitionProxy struct { + // HTTPProxy is the HTTP proxy to use for Ignition. + // A single URL that specifies the proxy server to use for HTTP and HTTPS requests, + // unless overridden by the HTTPSProxy or NoProxy options. + // +optional + HTTPProxy *string `json:"httpProxy,omitempty"` + + // HTTPSProxy is the HTTPS proxy to use for Ignition. + // A single URL that specifies the proxy server to use for HTTPS requests, + // unless overridden by the NoProxy option. + // +optional + HTTPSProxy *string `json:"httpsProxy,omitempty"` + + // NoProxy is the list of domains to not proxy for Ignition. + // Specifies a list of strings to hosts that should be excluded from proxying. + // + // Each value is represented by: + // - An IP address prefix (1.2.3.4) + // - An IP address prefix in CIDR notation (1.2.3.4/8) + // - A domain name + // - A domain name matches that name and all subdomains + // - A domain name with a leading . matches subdomains only + // - A special DNS label (*), indicates that no proxying should be done + // + // An IP address prefix and domain name can also include a literal port number (1.2.3.4:80). + // +optional + // +kubebuilder:validation:MaxItems=64 + NoProxy []IgnitionNoProxy `json:"noProxy,omitempty"` +} + +// AWSMachineStatus defines the observed state of AWSMachine. +type AWSMachineStatus struct { + // Ready is true when the provider resource is ready. + // +optional + Ready bool `json:"ready"` + + // Interruptible reports that this machine is using spot instances and can therefore be interrupted by CAPI when it receives a notice that the spot instance is to be terminated by AWS. + // This will be set to true when SpotMarketOptions is not nil (i.e. this machine is using a spot instance). + // +optional + Interruptible bool `json:"interruptible,omitempty"` + + // Addresses contains the AWS instance associated addresses. + Addresses []clusterv1.MachineAddress `json:"addresses,omitempty"` + + // InstanceState is the state of the AWS instance for this machine. + // +optional + InstanceState *InstanceState `json:"instanceState,omitempty"` + + // FailureReason will be set in the event that there is a terminal problem + // reconciling the Machine and will contain a succinct value suitable + // for machine interpretation. + // + // This field should not be set for transitive errors that a controller + // faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the Machine's spec or the configuration of + // the controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the controller, or the + // responsible controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of Machines + // can be added as events to the Machine object and/or logged in the + // controller's output. + // +optional + FailureReason *errors.MachineStatusError `json:"failureReason,omitempty"` + + // FailureMessage will be set in the event that there is a terminal problem + // reconciling the Machine and will contain a more verbose string suitable + // for logging and human consumption. + // + // This field should not be set for transitive errors that a controller + // faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the Machine's spec or the configuration of + // the controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the controller, or the + // responsible controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of Machines + // can be added as events to the Machine object and/or logged in the + // controller's output. + // +optional + FailureMessage *string `json:"failureMessage,omitempty"` + + // Conditions defines current service state of the AWSMachine. + // +optional + Conditions clusterv1.Conditions `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=awsmachines,scope=Namespaced,categories=cluster-api,shortName=awsm +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AWSMachine belongs" +// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.instanceState",description="EC2 instance state" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Machine ready status" +// +kubebuilder:printcolumn:name="InstanceID",type="string",JSONPath=".spec.providerID",description="EC2 instance ID" +// +kubebuilder:printcolumn:name="Machine",type="string",JSONPath=".metadata.ownerReferences[?(@.kind==\"Machine\")].name",description="Machine object which owns with this AWSMachine" +// +k8s:defaulter-gen=true + +// AWSMachine is the schema for Amazon EC2 machines. +type AWSMachine struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AWSMachineSpec `json:"spec,omitempty"` + Status AWSMachineStatus `json:"status,omitempty"` +} + +// GetConditions returns the observations of the operational state of the AWSMachine resource. +func (r *AWSMachine) GetConditions() clusterv1.Conditions { + return r.Status.Conditions +} + +// SetConditions sets the underlying service state of the AWSMachine to the predescribed clusterv1.Conditions. +func (r *AWSMachine) SetConditions(conditions clusterv1.Conditions) { + r.Status.Conditions = conditions +} + +// +kubebuilder:object:root=true + +// AWSMachineList contains a list of Amazon EC2 machines. +type AWSMachineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AWSMachine `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AWSMachine{}, &AWSMachineList{}) +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsmachine_webhook.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsmachine_webhook.go new file mode 100644 index 000000000..50af4f221 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsmachine_webhook.go @@ -0,0 +1,423 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "encoding/base64" + "fmt" + "net" + "net/url" + "strings" + + "github.com/google/go-cmp/cmp" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "sigs.k8s.io/cluster-api-provider-aws/v2/feature" +) + +// log is for logging in this package. +var log = ctrl.Log.WithName("awsmachine-resource") + +func (r *AWSMachine) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta2-awsmachine,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsmachines,versions=v1beta2,name=validation.awsmachine.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 +// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta2-awsmachine,mutating=true,failurePolicy=fail,groups=infrastructure.cluster.x-k8s.io,resources=awsmachines,versions=v1beta2,name=mawsmachine.kb.io,name=mutation.awsmachine.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + +var ( + _ webhook.Validator = &AWSMachine{} + _ webhook.Defaulter = &AWSMachine{} +) + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type. +func (r *AWSMachine) ValidateCreate() (admission.Warnings, error) { + var allErrs field.ErrorList + + allErrs = append(allErrs, r.validateCloudInitSecret()...) + allErrs = append(allErrs, r.validateIgnitionAndCloudInit()...) + allErrs = append(allErrs, r.validateRootVolume()...) + allErrs = append(allErrs, r.validateNonRootVolumes()...) + allErrs = append(allErrs, r.validateSSHKeyName()...) + allErrs = append(allErrs, r.validateAdditionalSecurityGroups()...) + allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...) + allErrs = append(allErrs, r.validateNetworkElasticIPPool()...) + + return nil, aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. +func (r *AWSMachine) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + newAWSMachine, err := runtime.DefaultUnstructuredConverter.ToUnstructured(r) + if err != nil { + return nil, apierrors.NewInvalid(GroupVersion.WithKind("AWSMachine").GroupKind(), r.Name, field.ErrorList{ + field.InternalError(nil, errors.Wrap(err, "failed to convert new AWSMachine to unstructured object")), + }) + } + oldAWSMachine, err := runtime.DefaultUnstructuredConverter.ToUnstructured(old) + if err != nil { + return nil, apierrors.NewInvalid(GroupVersion.WithKind("AWSMachine").GroupKind(), r.Name, field.ErrorList{ + field.InternalError(nil, errors.Wrap(err, "failed to convert old AWSMachine to unstructured object")), + }) + } + + var allErrs field.ErrorList + + allErrs = append(allErrs, r.validateCloudInitSecret()...) + allErrs = append(allErrs, r.validateAdditionalSecurityGroups()...) + allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...) + + newAWSMachineSpec := newAWSMachine["spec"].(map[string]interface{}) + oldAWSMachineSpec := oldAWSMachine["spec"].(map[string]interface{}) + + // allow changes to providerID + delete(oldAWSMachineSpec, "providerID") + delete(newAWSMachineSpec, "providerID") + + // allow changes to instanceID + delete(oldAWSMachineSpec, "instanceID") + delete(newAWSMachineSpec, "instanceID") + + // allow changes to additionalTags + delete(oldAWSMachineSpec, "additionalTags") + delete(newAWSMachineSpec, "additionalTags") + + // allow changes to additionalSecurityGroups + delete(oldAWSMachineSpec, "additionalSecurityGroups") + delete(newAWSMachineSpec, "additionalSecurityGroups") + + // allow changes to secretPrefix, secretCount, and secureSecretsBackend + if cloudInit, ok := oldAWSMachineSpec["cloudInit"].(map[string]interface{}); ok { + delete(cloudInit, "secretPrefix") + delete(cloudInit, "secretCount") + delete(cloudInit, "secureSecretsBackend") + } + + if cloudInit, ok := newAWSMachineSpec["cloudInit"].(map[string]interface{}); ok { + delete(cloudInit, "secretPrefix") + delete(cloudInit, "secretCount") + delete(cloudInit, "secureSecretsBackend") + } + + // allow changes to enableResourceNameDNSAAAARecord and enableResourceNameDNSARecord + if privateDNSName, ok := oldAWSMachineSpec["privateDnsName"].(map[string]interface{}); ok { + delete(privateDNSName, "enableResourceNameDnsAAAARecord") + delete(privateDNSName, "enableResourceNameDnsARecord") + } + + if privateDNSName, ok := newAWSMachineSpec["privateDnsName"].(map[string]interface{}); ok { + delete(privateDNSName, "enableResourceNameDnsAAAARecord") + delete(privateDNSName, "enableResourceNameDnsARecord") + } + + if !cmp.Equal(oldAWSMachineSpec, newAWSMachineSpec) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "cannot be modified")) + } + + return nil, aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs) +} + +func (r *AWSMachine) validateCloudInitSecret() field.ErrorList { + var allErrs field.ErrorList + + if r.Spec.CloudInit.InsecureSkipSecretsManager { + if r.Spec.CloudInit.SecretPrefix != "" { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "cloudInit", "secretPrefix"), "cannot be set if spec.cloudInit.insecureSkipSecretsManager is true")) + } + if r.Spec.CloudInit.SecretCount != 0 { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "cloudInit", "secretCount"), "cannot be set if spec.cloudInit.insecureSkipSecretsManager is true")) + } + if r.Spec.CloudInit.SecureSecretsBackend != "" { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "cloudInit", "secureSecretsBackend"), "cannot be set if spec.cloudInit.insecureSkipSecretsManager is true")) + } + } + + if (r.Spec.CloudInit.SecretPrefix != "") != (r.Spec.CloudInit.SecretCount != 0) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "cloudInit", "secretCount"), "must be set together with spec.CloudInit.SecretPrefix")) + } + + return allErrs +} + +func (r *AWSMachine) cloudInitConfigured() bool { + configured := false + + configured = configured || r.Spec.CloudInit.SecretPrefix != "" + configured = configured || r.Spec.CloudInit.SecretCount != 0 + configured = configured || r.Spec.CloudInit.SecureSecretsBackend != "" + configured = configured || r.Spec.CloudInit.InsecureSkipSecretsManager + + return configured +} + +func (r *AWSMachine) ignitionEnabled() bool { + return r.Spec.Ignition != nil +} + +func (r *AWSMachine) validateIgnitionAndCloudInit() field.ErrorList { + var allErrs field.ErrorList + if !r.ignitionEnabled() { + return allErrs + } + + // Feature gate is not enabled but ignition is enabled then send a forbidden error. + if !feature.Gates.Enabled(feature.BootstrapFormatIgnition) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "ignition"), + "can be set only if the BootstrapFormatIgnition feature gate is enabled")) + } + + // If ignition is enabled, cloudInit should not be configured. + if r.cloudInitConfigured() { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "cloudInit"), "cannot be set if spec.ignition is set")) + } + + // Proxy and TLS are only valid for Ignition versions >= 3.1. + if r.Spec.Ignition.Version == "2.3" || r.Spec.Ignition.Version == "3.0" { + if r.Spec.Ignition.Proxy != nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "ignition", "proxy"), "cannot be set if spec.ignition.version is 2.3 or 3.0")) + } + if r.Spec.Ignition.TLS != nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "ignition", "tls"), "cannot be set if spec.ignition.version is 2.3 or 3.0")) + } + } + + allErrs = append(allErrs, r.validateIgnitionProxy()...) + allErrs = append(allErrs, r.validateIgnitionTLS()...) + + return allErrs +} + +func (r *AWSMachine) validateIgnitionProxy() field.ErrorList { + var allErrs field.ErrorList + + if r.Spec.Ignition.Proxy == nil { + return allErrs + } + + // Validate HTTPProxy. + if r.Spec.Ignition.Proxy.HTTPProxy != nil { + // Parse the url to check if it is valid. + _, err := url.Parse(*r.Spec.Ignition.Proxy.HTTPProxy) + if err != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ignition", "proxy", "httpProxy"), *r.Spec.Ignition.Proxy.HTTPProxy, "invalid URL")) + } + } + + // Validate HTTPSProxy. + if r.Spec.Ignition.Proxy.HTTPSProxy != nil { + // Parse the url to check if it is valid. + _, err := url.Parse(*r.Spec.Ignition.Proxy.HTTPSProxy) + if err != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ignition", "proxy", "httpsProxy"), *r.Spec.Ignition.Proxy.HTTPSProxy, "invalid URL")) + } + } + + // Validate NoProxy. + for _, noProxy := range r.Spec.Ignition.Proxy.NoProxy { + noProxy := string(noProxy) + // Validate here that the value `noProxy` is: + // - A domain name + // - A domain name matches that name and all subdomains + // - A domain name with a leading . matches subdomains only + + // A special DNS label (*). + if noProxy == "*" { + continue + } + // An IP address prefix (1.2.3.4). + if ip := net.ParseIP(noProxy); ip != nil { + continue + } + // An IP address prefix in CIDR notation (1.2.3.4/8). + if _, _, err := net.ParseCIDR(noProxy); err == nil { + continue + } + // An IP or domain name with a port. + if _, _, err := net.SplitHostPort(noProxy); err == nil { + continue + } + // A domain name. + if noProxy[0] == '.' { + // If it starts with a dot, it should be a domain name. + noProxy = noProxy[1:] + } + // Validate that the value matches DNS 1123. + if errs := validation.IsDNS1123Subdomain(noProxy); len(errs) > 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ignition", "proxy", "noProxy"), noProxy, fmt.Sprintf("invalid noProxy value, please refer to the field documentation: %s", strings.Join(errs, "; ")))) + } + } + + return allErrs +} + +func (r *AWSMachine) validateIgnitionTLS() field.ErrorList { + var allErrs field.ErrorList + + if r.Spec.Ignition.TLS == nil { + return allErrs + } + + for _, source := range r.Spec.Ignition.TLS.CASources { + // Validate that source is RFC 2397 data URL. + u, err := url.Parse(string(source)) + if err != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ignition", "tls", "caSources"), source, "invalid URL")) + } + + switch u.Scheme { + case "http", "https", "tftp", "s3", "arn", "gs": + // Valid schemes. + case "data": + // Validate that the data URL is base64 encoded. + i := strings.Index(u.Opaque, ",") + if i < 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ignition", "tls", "caSources"), source, "invalid data URL")) + } + // Validate that the data URL is base64 encoded. + if _, err := base64.StdEncoding.DecodeString(u.Opaque[i+1:]); err != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ignition", "tls", "caSources"), source, "invalid base64 encoding for data url")) + } + default: + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ignition", "tls", "caSources"), source, "unsupported URL scheme")) + } + } + + return allErrs +} + +func (r *AWSMachine) validateRootVolume() field.ErrorList { + var allErrs field.ErrorList + + if r.Spec.RootVolume == nil { + return allErrs + } + + if VolumeTypesProvisioned.Has(string(r.Spec.RootVolume.Type)) && r.Spec.RootVolume.IOPS == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("spec.rootVolume.iops"), "iops required if type is 'io1' or 'io2'")) + } + + if r.Spec.RootVolume.Throughput != nil { + if r.Spec.RootVolume.Type != VolumeTypeGP3 { + allErrs = append(allErrs, field.Required(field.NewPath("spec.rootVolume.throughput"), "throughput is valid only for type 'gp3'")) + } + if *r.Spec.RootVolume.Throughput < 0 { + allErrs = append(allErrs, field.Required(field.NewPath("spec.rootVolume.throughput"), "throughput must be nonnegative")) + } + } + + if r.Spec.RootVolume.DeviceName != "" { + log.Info("root volume shouldn't have a device name (this can be ignored if performing a `clusterctl move`)") + } + + return allErrs +} + +func (r *AWSMachine) validateNetworkElasticIPPool() field.ErrorList { + var allErrs field.ErrorList + + if r.Spec.ElasticIPPool == nil { + return allErrs + } + if !ptr.Deref(r.Spec.PublicIP, false) { + allErrs = append(allErrs, field.Required(field.NewPath("spec.elasticIpPool"), "publicIp must be set to 'true' to assign custom public IPv4 pools with elasticIpPool")) + } + eipp := r.Spec.ElasticIPPool + if eipp.PublicIpv4Pool != nil { + if eipp.PublicIpv4PoolFallBackOrder == nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec.elasticIpPool.publicIpv4PoolFallbackOrder"), r.Spec.ElasticIPPool, "publicIpv4PoolFallbackOrder must be set when publicIpv4Pool is defined.")) + } + awsPublicIpv4PoolPrefix := "ipv4pool-ec2-" + if !strings.HasPrefix(*eipp.PublicIpv4Pool, awsPublicIpv4PoolPrefix) { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec.elasticIpPool.publicIpv4Pool"), r.Spec.ElasticIPPool, fmt.Sprintf("publicIpv4Pool must start with %s.", awsPublicIpv4PoolPrefix))) + } + } else if eipp.PublicIpv4PoolFallBackOrder != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec.elasticIpPool.publicIpv4PoolFallbackOrder"), r.Spec.ElasticIPPool, "publicIpv4Pool must be set when publicIpv4PoolFallbackOrder is defined.")) + } + + return allErrs +} + +func (r *AWSMachine) validateNonRootVolumes() field.ErrorList { + var allErrs field.ErrorList + + for _, volume := range r.Spec.NonRootVolumes { + if VolumeTypesProvisioned.Has(string(volume.Type)) && volume.IOPS == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("spec.nonRootVolumes.iops"), "iops required if type is 'io1' or 'io2'")) + } + + if volume.Throughput != nil { + if volume.Type != VolumeTypeGP3 { + allErrs = append(allErrs, field.Required(field.NewPath("spec.nonRootVolumes.throughput"), "throughput is valid only for type 'gp3'")) + } + if *volume.Throughput < 0 { + allErrs = append(allErrs, field.Required(field.NewPath("spec.nonRootVolumes.throughput"), "throughput must be nonnegative")) + } + } + + if volume.DeviceName == "" { + allErrs = append(allErrs, field.Required(field.NewPath("spec.nonRootVolumes.deviceName"), "non root volume should have device name")) + } + } + + return allErrs +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type. +func (r *AWSMachine) ValidateDelete() (admission.Warnings, error) { + return nil, nil +} + +// Default implements webhook.Defaulter such that an empty CloudInit will be defined with a default +// SecureSecretsBackend as SecretBackendSecretsManager iff InsecureSkipSecretsManager is unset. +func (r *AWSMachine) Default() { + if !r.Spec.CloudInit.InsecureSkipSecretsManager && r.Spec.CloudInit.SecureSecretsBackend == "" && !r.ignitionEnabled() { + r.Spec.CloudInit.SecureSecretsBackend = SecretBackendSecretsManager + } + + if r.ignitionEnabled() && r.Spec.Ignition.Version == "" { + if r.Spec.Ignition == nil { + r.Spec.Ignition = &Ignition{} + } + + r.Spec.Ignition.Version = DefaultIgnitionVersion + } +} + +func (r *AWSMachine) validateAdditionalSecurityGroups() field.ErrorList { + var allErrs field.ErrorList + + for _, additionalSecurityGroup := range r.Spec.AdditionalSecurityGroups { + if len(additionalSecurityGroup.Filters) > 0 && additionalSecurityGroup.ID != nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec.additionalSecurityGroups"), "only one of ID or Filters may be specified, specifying both is forbidden")) + } + } + return allErrs +} + +func (r *AWSMachine) validateSSHKeyName() field.ErrorList { + return validateSSHKeyName(r.Spec.SSHKeyName) +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsmachinetemplate_types.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsmachinetemplate_types.go new file mode 100644 index 000000000..50d8dda22 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsmachinetemplate_types.go @@ -0,0 +1,76 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +// AWSMachineTemplateStatus defines a status for an AWSMachineTemplate. +type AWSMachineTemplateStatus struct { + // Capacity defines the resource capacity for this machine. + // This value is used for autoscaling from zero operations as defined in: + // https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md + // +optional + Capacity corev1.ResourceList `json:"capacity,omitempty"` +} + +// AWSMachineTemplateSpec defines the desired state of AWSMachineTemplate. +type AWSMachineTemplateSpec struct { + Template AWSMachineTemplateResource `json:"template"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=awsmachinetemplates,scope=Namespaced,categories=cluster-api,shortName=awsmt +// +kubebuilder:storageversion +// +k8s:defaulter-gen=true + +// AWSMachineTemplate is the schema for the Amazon EC2 Machine Templates API. +type AWSMachineTemplate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AWSMachineTemplateSpec `json:"spec,omitempty"` + Status AWSMachineTemplateStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AWSMachineTemplateList contains a list of AWSMachineTemplate. +type AWSMachineTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AWSMachineTemplate `json:"items"` +} + +// AWSMachineTemplateResource describes the data needed to create am AWSMachine from a template. +type AWSMachineTemplateResource struct { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty"` + + // Spec is the specification of the desired behavior of the machine. + Spec AWSMachineSpec `json:"spec"` +} + +func init() { + SchemeBuilder.Register(&AWSMachineTemplate{}, &AWSMachineTemplateList{}) +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsmachinetemplate_webhook.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsmachinetemplate_webhook.go new file mode 100644 index 000000000..426a42882 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsmachinetemplate_webhook.go @@ -0,0 +1,247 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "context" + "fmt" + + "github.com/google/go-cmp/cmp" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "sigs.k8s.io/cluster-api-provider-aws/v2/feature" + "sigs.k8s.io/cluster-api/util/topology" +) + +func (r *AWSMachineTemplateWebhook) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(&AWSMachineTemplate{}). + WithValidator(r). + Complete() +} + +// AWSMachineTemplateWebhook implements a custom validation webhook for AWSMachineTemplate. +// Note: we use a custom validator to access the request context for SSA of AWSMachineTemplate. +// +kubebuilder:object:generate=false +type AWSMachineTemplateWebhook struct{} + +// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta2-awsmachinetemplate,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsmachinetemplates,versions=v1beta2,name=validation.awsmachinetemplate.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + +var _ webhook.CustomValidator = &AWSMachineTemplateWebhook{} + +func (r *AWSMachineTemplate) validateRootVolume() field.ErrorList { + var allErrs field.ErrorList + + spec := r.Spec.Template.Spec + if spec.RootVolume == nil { + return allErrs + } + + if VolumeTypesProvisioned.Has(string(spec.RootVolume.Type)) && spec.RootVolume.IOPS == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.rootVolume.iops"), "iops required if type is 'io1' or 'io2'")) + } + + if spec.RootVolume.Throughput != nil { + if spec.RootVolume.Type != VolumeTypeGP3 { + allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.rootVolume.throughput"), "throughput is valid only for type 'gp3'")) + } + if *spec.RootVolume.Throughput < 0 { + allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.rootVolume.throughput"), "throughput must be nonnegative")) + } + } + + if spec.RootVolume.DeviceName != "" { + log.Info("root volume shouldn't have a device name (this can be ignored if performing a `clusterctl move`)") + } + + return allErrs +} + +func (r *AWSMachineTemplate) validateNonRootVolumes() field.ErrorList { + var allErrs field.ErrorList + + spec := r.Spec.Template.Spec + + for _, volume := range spec.NonRootVolumes { + if VolumeTypesProvisioned.Has(string(volume.Type)) && volume.IOPS == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.nonRootVolumes.iops"), "iops required if type is 'io1' or 'io2'")) + } + + if volume.Throughput != nil { + if volume.Type != VolumeTypeGP3 { + allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.nonRootVolumes.throughput"), "throughput is valid only for type 'gp3'")) + } + if *volume.Throughput < 0 { + allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.nonRootVolumes.throughput"), "throughput must be nonnegative")) + } + } + + if volume.DeviceName == "" { + allErrs = append(allErrs, field.Required(field.NewPath("spec.template.spec.nonRootVolumes.deviceName"), "non root volume should have device name")) + } + } + + return allErrs +} + +func (r *AWSMachineTemplate) validateAdditionalSecurityGroups() field.ErrorList { + var allErrs field.ErrorList + + spec := r.Spec.Template.Spec + + for _, additionalSecurityGroup := range spec.AdditionalSecurityGroups { + if len(additionalSecurityGroup.Filters) > 0 && additionalSecurityGroup.ID != nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "template", "spec", "additionalSecurityGroups"), "only one of ID or Filters may be specified, specifying both is forbidden")) + } + } + return allErrs +} + +func (r *AWSMachineTemplate) validateCloudInitSecret() field.ErrorList { + var allErrs field.ErrorList + + spec := r.Spec.Template.Spec + if spec.CloudInit.InsecureSkipSecretsManager { + if spec.CloudInit.SecretPrefix != "" { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "template", "spec", "cloudInit", "secretPrefix"), "cannot be set if spec.template.spec.cloudInit.insecureSkipSecretsManager is true")) + } + if spec.CloudInit.SecretCount != 0 { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "template", "spec", "cloudInit", "secretCount"), "cannot be set if spec.template.spec.cloudInit.insecureSkipSecretsManager is true")) + } + if spec.CloudInit.SecureSecretsBackend != "" { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "template", "spec", "cloudInit", "secureSecretsBackend"), "cannot be set if spec.template.spec.cloudInit.insecureSkipSecretsManager is true")) + } + } + + if (spec.CloudInit.SecretPrefix != "") != (spec.CloudInit.SecretCount != 0) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "template", "spec", "cloudInit", "secretCount"), "must be set together with spec.template.spec.CloudInit.SecretPrefix")) + } + + return allErrs +} + +func (r *AWSMachineTemplate) cloudInitConfigured() bool { + spec := r.Spec.Template.Spec + configured := false + + configured = configured || spec.CloudInit.SecretPrefix != "" + configured = configured || spec.CloudInit.SecretCount != 0 + configured = configured || spec.CloudInit.SecureSecretsBackend != "" + configured = configured || spec.CloudInit.InsecureSkipSecretsManager + + return configured +} + +func (r *AWSMachineTemplate) ignitionEnabled() bool { + return r.Spec.Template.Spec.Ignition != nil +} + +func (r *AWSMachineTemplate) validateIgnitionAndCloudInit() field.ErrorList { + var allErrs field.ErrorList + + // Feature gate is not enabled but ignition is enabled then send a forbidden error. + if !feature.Gates.Enabled(feature.BootstrapFormatIgnition) && r.ignitionEnabled() { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "template", "spec", "ignition"), + "can be set only if the BootstrapFormatIgnition feature gate is enabled")) + } + + if r.ignitionEnabled() && r.cloudInitConfigured() { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "template", "spec", "cloudInit"), + "cannot be set if spec.template.spec.ignition is set")) + } + + return allErrs +} +func (r *AWSMachineTemplate) validateSSHKeyName() field.ErrorList { + return validateSSHKeyName(r.Spec.Template.Spec.SSHKeyName) +} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type. +func (r *AWSMachineTemplateWebhook) ValidateCreate(_ context.Context, raw runtime.Object) (admission.Warnings, error) { + var allErrs field.ErrorList + obj, ok := raw.(*AWSMachineTemplate) + if !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a AWSMachineTemplate but got a %T", raw)) + } + + spec := obj.Spec.Template.Spec + + if spec.CloudInit.SecretPrefix != "" { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "template", "spec", "cloudInit", "secretPrefix"), "cannot be set in templates")) + } + + if spec.CloudInit.SecretCount != 0 { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "cloudInit", "secretCount"), "cannot be set in templates")) + } + + if spec.ProviderID != nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "template", "spec", "providerID"), "cannot be set in templates")) + } + + allErrs = append(allErrs, obj.validateCloudInitSecret()...) + allErrs = append(allErrs, obj.validateIgnitionAndCloudInit()...) + allErrs = append(allErrs, obj.validateRootVolume()...) + allErrs = append(allErrs, obj.validateNonRootVolumes()...) + allErrs = append(allErrs, obj.validateSSHKeyName()...) + allErrs = append(allErrs, obj.validateAdditionalSecurityGroups()...) + allErrs = append(allErrs, obj.Spec.Template.Spec.AdditionalTags.Validate()...) + + return nil, aggregateObjErrors(obj.GroupVersionKind().GroupKind(), obj.Name, allErrs) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. +func (r *AWSMachineTemplateWebhook) ValidateUpdate(ctx context.Context, oldRaw runtime.Object, newRaw runtime.Object) (admission.Warnings, error) { + newAWSMachineTemplate, ok := newRaw.(*AWSMachineTemplate) + if !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a AWSMachineTemplate but got a %T", newRaw)) + } + oldAWSMachineTemplate, ok := oldRaw.(*AWSMachineTemplate) + if !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a AWSMachineTemplate but got a %T", oldRaw)) + } + + req, err := admission.RequestFromContext(ctx) + if err != nil { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a admission.Request inside context: %v", err)) + } + + var allErrs field.ErrorList + + if !topology.ShouldSkipImmutabilityChecks(req, newAWSMachineTemplate) && !cmp.Equal(newAWSMachineTemplate.Spec, oldAWSMachineTemplate.Spec) { + if oldAWSMachineTemplate.Spec.Template.Spec.InstanceMetadataOptions == nil { + oldAWSMachineTemplate.Spec.Template.Spec.InstanceMetadataOptions = newAWSMachineTemplate.Spec.Template.Spec.InstanceMetadataOptions + } + + if !cmp.Equal(newAWSMachineTemplate.Spec.Template.Spec, oldAWSMachineTemplate.Spec.Template.Spec) { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec", "template", "spec"), newAWSMachineTemplate, "AWSMachineTemplate.Spec is immutable"), + ) + } + } + + return nil, aggregateObjErrors(newAWSMachineTemplate.GroupVersionKind().GroupKind(), newAWSMachineTemplate.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type. +func (r *AWSMachineTemplateWebhook) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { + return nil, nil +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsmanagedcluster_types.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsmanagedcluster_types.go new file mode 100644 index 000000000..587ace765 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/awsmanagedcluster_types.go @@ -0,0 +1,71 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +// AWSManagedClusterSpec defines the desired state of AWSManagedCluster +type AWSManagedClusterSpec struct { + // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + // +optional + ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` +} + +// AWSManagedClusterStatus defines the observed state of AWSManagedCluster +type AWSManagedClusterStatus struct { + // Ready is when the AWSManagedControlPlane has a API server URL. + // +optional + Ready bool `json:"ready,omitempty"` + + // FailureDomains specifies a list fo available availability zones that can be used + // +optional + FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=awsmanagedclusters,scope=Namespaced,categories=cluster-api,shortName=awsmc +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AWSManagedControl belongs" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Control plane infrastructure is ready for worker nodes" +// +kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".spec.controlPlaneEndpoint.host",description="API Endpoint",priority=1 + +// AWSManagedCluster is the Schema for the awsmanagedclusters API +type AWSManagedCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AWSManagedClusterSpec `json:"spec,omitempty"` + Status AWSManagedClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AWSManagedClusterList contains a list of AWSManagedCluster. +type AWSManagedClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AWSManagedCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AWSManagedCluster{}, &AWSManagedClusterList{}) +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/bastion.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/bastion.go new file mode 100644 index 000000000..16c929516 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/bastion.go @@ -0,0 +1,63 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "fmt" + "net" + "regexp" + + "k8s.io/apimachinery/pkg/util/validation/field" +) + +var ( + sshKeyValidNameRegex = regexp.MustCompile(`^[[:graph:]]+([[:print:]]*[[:graph:]]+)*$`) +) + +// Validate will validate the bastion fields. +func (b *Bastion) Validate() []*field.Error { + var errs field.ErrorList + + if b.DisableIngressRules && len(b.AllowedCIDRBlocks) > 0 { + errs = append(errs, + field.Forbidden(field.NewPath("spec", "bastion", "allowedCIDRBlocks"), "cannot be set if spec.bastion.disableIngressRules is true"), + ) + return errs + } + + for i, cidr := range b.AllowedCIDRBlocks { + if _, _, err := net.ParseCIDR(cidr); err != nil { + errs = append(errs, + field.Invalid(field.NewPath("spec", "bastion", fmt.Sprintf("allowedCIDRBlocks[%d]", i)), cidr, "must be a valid CIDR block"), + ) + } + } + return errs +} + +func validateSSHKeyName(sshKeyName *string) field.ErrorList { + var allErrs field.ErrorList + switch { + case sshKeyName == nil: + // nil is accepted + case sshKeyName != nil && *sshKeyName == "": + // empty string is accepted + case sshKeyName != nil && !sshKeyValidNameRegex.MatchString(*sshKeyName): + allErrs = append(allErrs, field.Invalid(field.NewPath("sshKeyName"), sshKeyName, "Name is invalid. Must be specified in ASCII and must not start or end in whitespace")) + } + return allErrs +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/conditions_consts.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/conditions_consts.go new file mode 100644 index 000000000..604ef8e1d --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/conditions_consts.go @@ -0,0 +1,194 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + +const ( + // PrincipalCredentialRetrievedCondition reports on whether Principal credentials could be retrieved successfully. + // A possible scenario, where retrieval is unsuccessful, is when SourcePrincipal is not authorized for assume role. + PrincipalCredentialRetrievedCondition clusterv1.ConditionType = "PrincipalCredentialRetrieved" + // PrincipalCredentialRetrievalFailedReason used when errors occur during identity credential retrieval. + PrincipalCredentialRetrievalFailedReason = "PrincipalCredentialRetrievalFailed" + // CredentialProviderBuildFailedReason used when errors occur during building providers before trying credential retrieval. + //nolint:gosec + CredentialProviderBuildFailedReason = "CredentialProviderBuildFailed" + // PrincipalUsageAllowedCondition reports on whether Principal and all the nested source identities are allowed to be used in the AWSCluster namespace. + PrincipalUsageAllowedCondition clusterv1.ConditionType = "PrincipalUsageAllowed" + // PrincipalUsageUnauthorizedReason used when AWSCluster namespace is not in the identity's allowed namespaces list. + PrincipalUsageUnauthorizedReason = "PrincipalUsageUnauthorized" + // SourcePrincipalUsageUnauthorizedReason used when AWSCluster is not in the intersection of source identity allowed namespaces + // and allowed namespaces of the identities that source identity depends to. + SourcePrincipalUsageUnauthorizedReason = "SourcePrincipalUsageUnauthorized" +) + +const ( + // VpcReadyCondition reports on the successful reconciliation of a VPC. + VpcReadyCondition clusterv1.ConditionType = "VpcReady" + // VpcCreationStartedReason used when attempting to create a VPC for a managed cluster. + // Will not be applied to unmanaged clusters. + VpcCreationStartedReason = "VpcCreationStarted" + // VpcReconciliationFailedReason used when errors occur during VPC reconciliation. + VpcReconciliationFailedReason = "VpcReconciliationFailed" +) + +const ( + // SubnetsReadyCondition reports on the successful reconciliation of subnets. + SubnetsReadyCondition clusterv1.ConditionType = "SubnetsReady" + // SubnetsReconciliationFailedReason used to report failures while reconciling subnets. + SubnetsReconciliationFailedReason = "SubnetsReconciliationFailed" +) + +const ( + // InternetGatewayReadyCondition reports on the successful reconciliation of internet gateways. + // Only applicable to managed clusters. + InternetGatewayReadyCondition clusterv1.ConditionType = "InternetGatewayReady" + // InternetGatewayFailedReason used when errors occur during internet gateway reconciliation. + InternetGatewayFailedReason = "InternetGatewayFailed" +) + +const ( + // EgressOnlyInternetGatewayReadyCondition reports on the successful reconciliation of egress only internet gateways. + // Only applicable to managed clusters. + EgressOnlyInternetGatewayReadyCondition clusterv1.ConditionType = "EgressOnlyInternetGatewayReady" + // EgressOnlyInternetGatewayFailedReason used when errors occur during egress only internet gateway reconciliation. + EgressOnlyInternetGatewayFailedReason = "EgressOnlyInternetGatewayFailed" +) + +const ( + // CarrierGatewayReadyCondition reports on the successful reconciliation of carrier gateways. + // Only applicable to managed clusters. + CarrierGatewayReadyCondition clusterv1.ConditionType = "CarrierGatewayReady" + // CarrierGatewayFailedReason used when errors occur during carrier gateway reconciliation. + CarrierGatewayFailedReason = "CarrierGatewayFailed" +) + +const ( + // NatGatewaysReadyCondition reports successful reconciliation of NAT gateways. + // Only applicable to managed clusters. + NatGatewaysReadyCondition clusterv1.ConditionType = "NatGatewaysReady" + // NatGatewaysCreationStartedReason set once when creating new NAT gateways. + NatGatewaysCreationStartedReason = "NatGatewaysCreationStarted" + // NatGatewaysReconciliationFailedReason used when any errors occur during reconciliation of NAT gateways. + NatGatewaysReconciliationFailedReason = "NatGatewaysReconciliationFailed" +) + +const ( + // RouteTablesReadyCondition reports successful reconciliation of route tables. + // Only applicable to managed clusters. + RouteTablesReadyCondition clusterv1.ConditionType = "RouteTablesReady" + // RouteTableReconciliationFailedReason used when any errors occur during reconciliation of route tables. + RouteTableReconciliationFailedReason = "RouteTableReconciliationFailed" +) + +const ( + // VpcEndpointsReadyCondition reports successful reconciliation of vpc endpoints. + // Only applicable to managed clusters. + VpcEndpointsReadyCondition clusterv1.ConditionType = "VpcEndpointsReadyCondition" + // VpcEndpointsReconciliationFailedReason used when any errors occur during reconciliation of vpc endpoints. + VpcEndpointsReconciliationFailedReason = "VpcEndpointsReconciliationFailed" +) + +const ( + // SecondaryCidrsReadyCondition reports successful reconciliation of secondary CIDR blocks. + // Only applicable to managed clusters. + SecondaryCidrsReadyCondition clusterv1.ConditionType = "SecondaryCidrsReady" + // SecondaryCidrReconciliationFailedReason used when any errors occur during reconciliation of secondary CIDR blocks. + SecondaryCidrReconciliationFailedReason = "SecondaryCidrReconciliationFailed" +) + +const ( + // ClusterSecurityGroupsReadyCondition reports successful reconciliation of security groups. + ClusterSecurityGroupsReadyCondition clusterv1.ConditionType = "ClusterSecurityGroupsReady" + // ClusterSecurityGroupReconciliationFailedReason used when any errors occur during reconciliation of security groups. + ClusterSecurityGroupReconciliationFailedReason = "SecurityGroupReconciliationFailed" +) + +const ( + // BastionHostReadyCondition reports whether a bastion host is ready. Depending on the configuration, a cluster + // may not require a bastion host and this condition will be skipped. + BastionHostReadyCondition clusterv1.ConditionType = "BastionHostReady" + // BastionCreationStartedReason used when creating a new bastion host. + BastionCreationStartedReason = "BastionCreationStarted" + // BastionHostFailedReason used when an error occurs during the creation of a bastion host. + BastionHostFailedReason = "BastionHostFailed" +) + +const ( + // LoadBalancerReadyCondition reports on whether a control plane load balancer was successfully reconciled. + LoadBalancerReadyCondition clusterv1.ConditionType = "LoadBalancerReady" + // WaitForDNSNameReason used while waiting for a DNS name for the API server to be populated. + WaitForDNSNameReason = "WaitForDNSName" + // WaitForExternalControlPlaneEndpointReason is available when the AWS Cluster is waiting for an externally managed + // Load Balancer, such as an external Control Plane provider. + WaitForExternalControlPlaneEndpointReason = "WaitForExternalControlPlaneEndpoint" + // WaitForDNSNameResolveReason used while waiting for DNS name to resolve. + WaitForDNSNameResolveReason = "WaitForDNSNameResolve" + // LoadBalancerFailedReason used when an error occurs during load balancer reconciliation. + LoadBalancerFailedReason = "LoadBalancerFailed" +) + +const ( + // InstanceReadyCondition reports on current status of the EC2 instance. Ready indicates the instance is in a Running state. + InstanceReadyCondition clusterv1.ConditionType = "InstanceReady" + + // InstanceNotFoundReason used when the instance couldn't be retrieved. + InstanceNotFoundReason = "InstanceNotFound" + // InstanceTerminatedReason instance is in a terminated state. + InstanceTerminatedReason = "InstanceTerminated" + // InstanceStoppedReason instance is in a stopped state. + InstanceStoppedReason = "InstanceStopped" + // InstanceNotReadyReason used when the instance is in a pending state. + InstanceNotReadyReason = "InstanceNotReady" + // InstanceProvisionStartedReason set when the provisioning of an instance started. + InstanceProvisionStartedReason = "InstanceProvisionStarted" + // InstanceProvisionFailedReason used for failures during instance provisioning. + InstanceProvisionFailedReason = "InstanceProvisionFailed" + // WaitingForClusterInfrastructureReason used when machine is waiting for cluster infrastructure to be ready before proceeding. + WaitingForClusterInfrastructureReason = "WaitingForClusterInfrastructure" + // WaitingForBootstrapDataReason used when machine is waiting for bootstrap data to be ready before proceeding. + WaitingForBootstrapDataReason = "WaitingForBootstrapData" +) + +const ( + // SecurityGroupsReadyCondition indicates the security groups are up to date on the AWSMachine. + SecurityGroupsReadyCondition clusterv1.ConditionType = "SecurityGroupsReady" + + // SecurityGroupsFailedReason used when the security groups could not be synced. + SecurityGroupsFailedReason = "SecurityGroupsSyncFailed" +) + +const ( + // ELBAttachedCondition will report true when a control plane is successfully registered with an ELB. + // When set to false, severity can be an Error if the subnet is not found or unavailable in the instance's AZ. + // Note this is only applicable to control plane machines. + // Only applicable to control plane machines. + ELBAttachedCondition clusterv1.ConditionType = "ELBAttached" + + // ELBAttachFailedReason used when a control plane node fails to attach to the ELB. + ELBAttachFailedReason = "ELBAttachFailed" + // ELBDetachFailedReason used when a control plane node fails to detach from an ELB. + ELBDetachFailedReason = "ELBDetachFailed" +) + +const ( + // S3BucketReadyCondition indicates an S3 bucket has been created successfully. + S3BucketReadyCondition clusterv1.ConditionType = "S3BucketCreated" + + // S3BucketFailedReason is used when any errors occur during reconciliation of an S3 bucket. + S3BucketFailedReason = "S3BucketCreationFailed" +) diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/conversion.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/conversion.go new file mode 100644 index 000000000..aa1d61728 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/conversion.go @@ -0,0 +1,59 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +// Hub marks AWSCluster as a conversion hub. +func (*AWSCluster) Hub() {} + +// Hub marks AWSClusterList as a conversion hub. +func (*AWSClusterList) Hub() {} + +// Hub marks AWSMachine as a conversion hub. +func (*AWSMachine) Hub() {} + +// Hub marks AWSMachineList as a conversion hub. +func (*AWSMachineList) Hub() {} + +// Hub marks AWSMachineTemplate as a conversion hub. +func (*AWSMachineTemplate) Hub() {} + +// Hub marks AWSMachineTemplateList as a conversion hub. +func (*AWSMachineTemplateList) Hub() {} + +// Hub marks AWSClusterStaticIdentity as a conversion hub. +func (*AWSClusterStaticIdentity) Hub() {} + +// Hub marks AWSClusterStaticIdentityList as a conversion hub. +func (*AWSClusterStaticIdentityList) Hub() {} + +// Hub marks AWSClusterRoleIdentity as a conversion hub. +func (*AWSClusterRoleIdentity) Hub() {} + +// Hub marks AWSClusterRoleIdentityList as a conversion hub. +func (*AWSClusterRoleIdentityList) Hub() {} + +// Hub marks AWSClusterControllerIdentity as a conversion hub. +func (*AWSClusterControllerIdentity) Hub() {} + +// Hub marks AWSClusterControllerIdentityList as a conversion hub. +func (*AWSClusterControllerIdentityList) Hub() {} + +// Hub marks AWSClusterTemplate as a conversion hub. +func (*AWSClusterTemplate) Hub() {} + +// Hub marks AWSClusterTemplateList as a conversion hub. +func (*AWSClusterTemplateList) Hub() {} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/defaults.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/defaults.go new file mode 100644 index 000000000..f10bb895c --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/defaults.go @@ -0,0 +1,97 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" +) + +// SetDefaults_Bastion is used by defaulter-gen. +func SetDefaults_Bastion(obj *Bastion) { //nolint:golint,stylecheck + // Default to allow open access to the bastion host if no CIDR Blocks have been set + if len(obj.AllowedCIDRBlocks) == 0 && !obj.DisableIngressRules { + obj.AllowedCIDRBlocks = []string{"0.0.0.0/0"} + } +} + +// SetDefaults_NetworkSpec is used by defaulter-gen. +func SetDefaults_NetworkSpec(obj *NetworkSpec) { //nolint:golint,stylecheck + // Default to Calico ingress rules if no rules have been set + if obj.CNI == nil { + obj.CNI = &CNISpec{ + CNIIngressRules: CNIIngressRules{ + { + Description: "bgp (calico)", + Protocol: SecurityGroupProtocolTCP, + FromPort: 179, + ToPort: 179, + }, + { + Description: "IP-in-IP (calico)", + Protocol: SecurityGroupProtocolIPinIP, + FromPort: -1, + ToPort: 65535, + }, + }, + } + } +} + +// SetDefaults_AWSClusterSpec is used by defaulter-gen. +func SetDefaults_AWSClusterSpec(s *AWSClusterSpec) { //nolint:golint,stylecheck + if s.IdentityRef == nil { + s.IdentityRef = &AWSIdentityReference{ + Kind: ControllerIdentityKind, + Name: AWSClusterControllerIdentityName, + } + } + if s.ControlPlaneLoadBalancer == nil { + s.ControlPlaneLoadBalancer = &AWSLoadBalancerSpec{ + Scheme: &ELBSchemeInternetFacing, + } + } + if s.ControlPlaneLoadBalancer.LoadBalancerType == "" { + s.ControlPlaneLoadBalancer.LoadBalancerType = LoadBalancerTypeClassic + } + if s.SecondaryControlPlaneLoadBalancer != nil { + if s.SecondaryControlPlaneLoadBalancer.LoadBalancerType == "" { + s.SecondaryControlPlaneLoadBalancer.LoadBalancerType = LoadBalancerTypeNLB + } + if s.SecondaryControlPlaneLoadBalancer.Scheme == nil { + s.SecondaryControlPlaneLoadBalancer.Scheme = &ELBSchemeInternal + } + } +} + +// SetDefaults_Labels is used to default cluster scope resources for clusterctl move. +func SetDefaults_Labels(obj *metav1.ObjectMeta) { //nolint:golint,stylecheck + // Defaults to set label if no labels have been set + if obj.Labels == nil { + obj.Labels = map[string]string{ + clusterv1.ClusterctlMoveHierarchyLabel: ""} + } +} + +// SetDefaults_AWSMachineSpec is used by defaulter-gen. +func SetDefaults_AWSMachineSpec(obj *AWSMachineSpec) { //nolint:golint,stylecheck + if obj.InstanceMetadataOptions == nil { + obj.InstanceMetadataOptions = &InstanceMetadataOptions{} + } + obj.InstanceMetadataOptions.SetDefaults() +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/doc.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/doc.go new file mode 100644 index 000000000..4ed8bbddb --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +gencrdrefdocs:force +// +groupName=infrastructure.cluster.x-k8s.io + +// Package v1beta2 contains the v1beta2 API implementation. +package v1beta2 diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/groupversion_info.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/groupversion_info.go new file mode 100644 index 000000000..1d921ac08 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta2 contains API Schema definitions for the infrastructure v1beta2 API group +// +kubebuilder:object:generate=true +// +groupName=infrastructure.cluster.x-k8s.io +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "infrastructure.cluster.x-k8s.io", Version: "v1beta2"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/network_types.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/network_types.go new file mode 100644 index 000000000..2e123cbc3 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/network_types.go @@ -0,0 +1,1105 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "fmt" + "sort" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "k8s.io/utils/ptr" +) + +const ( + // DefaultAPIServerPort defines the API server port when defining a Load Balancer. + DefaultAPIServerPort = 6443 + // DefaultAPIServerPortString defines the API server port as a string for convenience. + DefaultAPIServerPortString = "6443" + // DefaultAPIServerHealthCheckPath the API server health check path. + DefaultAPIServerHealthCheckPath = "/readyz" + // DefaultAPIServerHealthCheckIntervalSec the API server health check interval in seconds. + DefaultAPIServerHealthCheckIntervalSec = 10 + // DefaultAPIServerHealthCheckTimeoutSec the API server health check timeout in seconds. + DefaultAPIServerHealthCheckTimeoutSec = 5 + // DefaultAPIServerHealthThresholdCount the API server health check threshold count. + DefaultAPIServerHealthThresholdCount = 5 + // DefaultAPIServerUnhealthThresholdCount the API server unhealthy check threshold count. + DefaultAPIServerUnhealthThresholdCount = 3 + + // ZoneTypeAvailabilityZone defines the regular AWS zones in the Region. + ZoneTypeAvailabilityZone ZoneType = "availability-zone" + // ZoneTypeLocalZone defines the AWS zone type in Local Zone infrastructure. + ZoneTypeLocalZone ZoneType = "local-zone" + // ZoneTypeWavelengthZone defines the AWS zone type in Wavelength infrastructure. + ZoneTypeWavelengthZone ZoneType = "wavelength-zone" +) + +// NetworkStatus encapsulates AWS networking resources. +type NetworkStatus struct { + // SecurityGroups is a map from the role/kind of the security group to its unique name, if any. + SecurityGroups map[SecurityGroupRole]SecurityGroup `json:"securityGroups,omitempty"` + + // APIServerELB is the Kubernetes api server load balancer. + APIServerELB LoadBalancer `json:"apiServerElb,omitempty"` + + // SecondaryAPIServerELB is the secondary Kubernetes api server load balancer. + SecondaryAPIServerELB LoadBalancer `json:"secondaryAPIServerELB,omitempty"` + + // NatGatewaysIPs contains the public IPs of the NAT Gateways + NatGatewaysIPs []string `json:"natGatewaysIPs,omitempty"` +} + +// ELBScheme defines the scheme of a load balancer. +type ELBScheme string + +var ( + // ELBSchemeInternetFacing defines an internet-facing, publicly + // accessible AWS ELB scheme. + ELBSchemeInternetFacing = ELBScheme("internet-facing") + + // ELBSchemeInternal defines an internal-only facing + // load balancer internal to an ELB. + ELBSchemeInternal = ELBScheme("internal") +) + +func (e ELBScheme) String() string { + return string(e) +} + +// Equals returns true if two ELBScheme are equal. +func (e ELBScheme) Equals(other *ELBScheme) bool { + if other == nil { + return false + } + + return e == *other +} + +// ELBProtocol defines listener protocols for a load balancer. +type ELBProtocol string + +func (e ELBProtocol) String() string { + return string(e) +} + +var ( + // ELBProtocolTCP defines the ELB API string representing the TCP protocol. + ELBProtocolTCP = ELBProtocol("TCP") + // ELBProtocolSSL defines the ELB API string representing the TLS protocol. + ELBProtocolSSL = ELBProtocol("SSL") + // ELBProtocolHTTP defines the ELB API string representing the HTTP protocol at L7. + ELBProtocolHTTP = ELBProtocol("HTTP") + // ELBProtocolHTTPS defines the ELB API string representing the HTTP protocol at L7. + ELBProtocolHTTPS = ELBProtocol("HTTPS") + // ELBProtocolTLS defines the NLB API string representing the TLS protocol. + ELBProtocolTLS = ELBProtocol("TLS") + // ELBProtocolUDP defines the NLB API string representing the UDP protocol. + ELBProtocolUDP = ELBProtocol("UDP") +) + +// TargetGroupHealthCheck defines health check settings for the target group. +type TargetGroupHealthCheck struct { + Protocol *string `json:"protocol,omitempty"` + Path *string `json:"path,omitempty"` + Port *string `json:"port,omitempty"` + IntervalSeconds *int64 `json:"intervalSeconds,omitempty"` + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty"` + ThresholdCount *int64 `json:"thresholdCount,omitempty"` + UnhealthyThresholdCount *int64 `json:"unhealthyThresholdCount,omitempty"` +} + +// TargetGroupHealthCheckAPISpec defines the optional health check settings for the API target group. +type TargetGroupHealthCheckAPISpec struct { + // The approximate amount of time, in seconds, between health checks of an individual + // target. + // +kubebuilder:validation:Minimum=5 + // +kubebuilder:validation:Maximum=300 + // +optional + IntervalSeconds *int64 `json:"intervalSeconds,omitempty"` + + // The amount of time, in seconds, during which no response from a target means + // a failed health check. + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=120 + // +optional + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty"` + + // The number of consecutive health check successes required before considering + // a target healthy. + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=10 + // +optional + ThresholdCount *int64 `json:"thresholdCount,omitempty"` + + // The number of consecutive health check failures required before considering + // a target unhealthy. + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=10 + // +optional + UnhealthyThresholdCount *int64 `json:"unhealthyThresholdCount,omitempty"` +} + +// TargetGroupHealthCheckAdditionalSpec defines the optional health check settings for the additional target groups. +type TargetGroupHealthCheckAdditionalSpec struct { + // The protocol to use to health check connect with the target. When not specified the Protocol + // will be the same of the listener. + // +kubebuilder:validation:Enum=TCP;HTTP;HTTPS + // +optional + Protocol *string `json:"protocol,omitempty"` + + // The port the load balancer uses when performing health checks for additional target groups. When + // not specified this value will be set for the same of listener port. + // +optional + Port *string `json:"port,omitempty"` + + // The destination for health checks on the targets when using the protocol HTTP or HTTPS, + // otherwise the path will be ignored. + // +optional + Path *string `json:"path,omitempty"` + // The approximate amount of time, in seconds, between health checks of an individual + // target. + // +kubebuilder:validation:Minimum=5 + // +kubebuilder:validation:Maximum=300 + // +optional + IntervalSeconds *int64 `json:"intervalSeconds,omitempty"` + + // The amount of time, in seconds, during which no response from a target means + // a failed health check. + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=120 + // +optional + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty"` + + // The number of consecutive health check successes required before considering + // a target healthy. + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=10 + // +optional + ThresholdCount *int64 `json:"thresholdCount,omitempty"` + + // The number of consecutive health check failures required before considering + // a target unhealthy. + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=10 + // +optional + UnhealthyThresholdCount *int64 `json:"unhealthyThresholdCount,omitempty"` +} + +// TargetGroupAttribute defines attribute key values for V2 Load Balancer Attributes. +type TargetGroupAttribute string + +var ( + // TargetGroupAttributeEnablePreserveClientIP defines the attribute key for enabling preserve client IP. + TargetGroupAttributeEnablePreserveClientIP = "preserve_client_ip.enabled" +) + +// LoadBalancerAttribute defines a set of attributes for a V2 load balancer. +type LoadBalancerAttribute string + +var ( + // LoadBalancerAttributeEnableLoadBalancingCrossZone defines the attribute key for enabling load balancing cross zone. + LoadBalancerAttributeEnableLoadBalancingCrossZone = "load_balancing.cross_zone.enabled" + // LoadBalancerAttributeIdleTimeTimeoutSeconds defines the attribute key for idle timeout. + LoadBalancerAttributeIdleTimeTimeoutSeconds = "idle_timeout.timeout_seconds" + // LoadBalancerAttributeIdleTimeDefaultTimeoutSecondsInSeconds defines the default idle timeout in seconds. + LoadBalancerAttributeIdleTimeDefaultTimeoutSecondsInSeconds = "60" +) + +// TargetGroupSpec specifies target group settings for a given listener. +// This is created first, and the ARN is then passed to the listener. +type TargetGroupSpec struct { + // Name of the TargetGroup. Must be unique over the same group of listeners. + // +kubebuilder:validation:MaxLength=32 + Name string `json:"name"` + // Port is the exposed port + Port int64 `json:"port"` + // +kubebuilder:validation:Enum=tcp;tls;udp;TCP;TLS;UDP + Protocol ELBProtocol `json:"protocol"` + VpcID string `json:"vpcId"` + // HealthCheck is the elb health check associated with the load balancer. + HealthCheck *TargetGroupHealthCheck `json:"targetGroupHealthCheck,omitempty"` +} + +// Listener defines an AWS network load balancer listener. +type Listener struct { + Protocol ELBProtocol `json:"protocol"` + Port int64 `json:"port"` + TargetGroup TargetGroupSpec `json:"targetGroup"` +} + +// LoadBalancer defines an AWS load balancer. +type LoadBalancer struct { + // ARN of the load balancer. Unlike the ClassicLB, ARN is used mostly + // to define and get it. + ARN string `json:"arn,omitempty"` + // The name of the load balancer. It must be unique within the set of load balancers + // defined in the region. It also serves as identifier. + // +optional + Name string `json:"name,omitempty"` + + // DNSName is the dns name of the load balancer. + DNSName string `json:"dnsName,omitempty"` + + // Scheme is the load balancer scheme, either internet-facing or private. + Scheme ELBScheme `json:"scheme,omitempty"` + + // AvailabilityZones is an array of availability zones in the VPC attached to the load balancer. + AvailabilityZones []string `json:"availabilityZones,omitempty"` + + // SubnetIDs is an array of subnets in the VPC attached to the load balancer. + SubnetIDs []string `json:"subnetIds,omitempty"` + + // SecurityGroupIDs is an array of security groups assigned to the load balancer. + SecurityGroupIDs []string `json:"securityGroupIds,omitempty"` + + // ClassicELBListeners is an array of classic elb listeners associated with the load balancer. There must be at least one. + ClassicELBListeners []ClassicELBListener `json:"listeners,omitempty"` + + // HealthCheck is the classic elb health check associated with the load balancer. + HealthCheck *ClassicELBHealthCheck `json:"healthChecks,omitempty"` + + // ClassicElbAttributes defines extra attributes associated with the load balancer. + ClassicElbAttributes ClassicELBAttributes `json:"attributes,omitempty"` + + // Tags is a map of tags associated with the load balancer. + Tags map[string]string `json:"tags,omitempty"` + + // ELBListeners is an array of listeners associated with the load balancer. There must be at least one. + ELBListeners []Listener `json:"elbListeners,omitempty"` + + // ELBAttributes defines extra attributes associated with v2 load balancers. + ELBAttributes map[string]*string `json:"elbAttributes,omitempty"` + + // LoadBalancerType sets the type for a load balancer. The default type is classic. + // +kubebuilder:validation:Enum:=classic;elb;alb;nlb + LoadBalancerType LoadBalancerType `json:"loadBalancerType,omitempty"` +} + +// IsUnmanaged returns true if the Classic ELB is unmanaged. +func (b *LoadBalancer) IsUnmanaged(clusterName string) bool { + return b.Name != "" && !Tags(b.Tags).HasOwned(clusterName) +} + +// IsManaged returns true if Classic ELB is managed. +func (b *LoadBalancer) IsManaged(clusterName string) bool { + return !b.IsUnmanaged(clusterName) +} + +// ClassicELBAttributes defines extra attributes associated with a classic load balancer. +type ClassicELBAttributes struct { + // IdleTimeout is time that the connection is allowed to be idle (no data + // has been sent over the connection) before it is closed by the load balancer. + IdleTimeout time.Duration `json:"idleTimeout,omitempty"` + + // CrossZoneLoadBalancing enables the classic load balancer load balancing. + // +optional + CrossZoneLoadBalancing bool `json:"crossZoneLoadBalancing,omitempty"` +} + +// ClassicELBListener defines an AWS classic load balancer listener. +type ClassicELBListener struct { + Protocol ELBProtocol `json:"protocol"` + Port int64 `json:"port"` + InstanceProtocol ELBProtocol `json:"instanceProtocol"` + InstancePort int64 `json:"instancePort"` +} + +// ClassicELBHealthCheck defines an AWS classic load balancer health check. +type ClassicELBHealthCheck struct { + Target string `json:"target"` + Interval time.Duration `json:"interval"` + Timeout time.Duration `json:"timeout"` + HealthyThreshold int64 `json:"healthyThreshold"` + UnhealthyThreshold int64 `json:"unhealthyThreshold"` +} + +// NetworkSpec encapsulates all things related to AWS network. +type NetworkSpec struct { + // VPC configuration. + // +optional + VPC VPCSpec `json:"vpc,omitempty"` + + // Subnets configuration. + // +optional + Subnets Subnets `json:"subnets,omitempty"` + + // CNI configuration + // +optional + CNI *CNISpec `json:"cni,omitempty"` + + // SecurityGroupOverrides is an optional set of security groups to use for cluster instances + // This is optional - if not provided new security groups will be created for the cluster + // +optional + SecurityGroupOverrides map[SecurityGroupRole]string `json:"securityGroupOverrides,omitempty"` + + // AdditionalControlPlaneIngressRules is an optional set of ingress rules to add to the control plane + // +optional + AdditionalControlPlaneIngressRules []IngressRule `json:"additionalControlPlaneIngressRules,omitempty"` +} + +// IPv6 contains ipv6 specific settings for the network. +type IPv6 struct { + // CidrBlock is the CIDR block provided by Amazon when VPC has enabled IPv6. + // Mutually exclusive with IPAMPool. + // +optional + CidrBlock string `json:"cidrBlock,omitempty"` + + // PoolID is the IP pool which must be defined in case of BYO IP is defined. + // Must be specified if CidrBlock is set. + // Mutually exclusive with IPAMPool. + // +optional + PoolID string `json:"poolId,omitempty"` + + // EgressOnlyInternetGatewayID is the id of the egress only internet gateway associated with an IPv6 enabled VPC. + // +optional + EgressOnlyInternetGatewayID *string `json:"egressOnlyInternetGatewayId,omitempty"` + + // IPAMPool defines the IPAMv6 pool to be used for VPC. + // Mutually exclusive with CidrBlock. + // +optional + IPAMPool *IPAMPool `json:"ipamPool,omitempty"` +} + +// IPAMPool defines the IPAM pool to be used for VPC. +type IPAMPool struct { + // ID is the ID of the IPAM pool this provider should use to create VPC. + ID string `json:"id,omitempty"` + // Name is the name of the IPAM pool this provider should use to create VPC. + Name string `json:"name,omitempty"` + // The netmask length of the IPv4 CIDR you want to allocate to VPC from + // an Amazon VPC IP Address Manager (IPAM) pool. + // Defaults to /16 for IPv4 if not specified. + NetmaskLength int64 `json:"netmaskLength,omitempty"` +} + +// VPCSpec configures an AWS VPC. +type VPCSpec struct { + // ID is the vpc-id of the VPC this provider should use to create resources. + ID string `json:"id,omitempty"` + + // CidrBlock is the CIDR block to be used when the provider creates a managed VPC. + // Defaults to 10.0.0.0/16. + // Mutually exclusive with IPAMPool. + CidrBlock string `json:"cidrBlock,omitempty"` + + // IPAMPool defines the IPAMv4 pool to be used for VPC. + // Mutually exclusive with CidrBlock. + IPAMPool *IPAMPool `json:"ipamPool,omitempty"` + + // IPv6 contains ipv6 specific settings for the network. Supported only in managed clusters. + // This field cannot be set on AWSCluster object. + // +optional + IPv6 *IPv6 `json:"ipv6,omitempty"` + + // InternetGatewayID is the id of the internet gateway associated with the VPC. + // +optional + InternetGatewayID *string `json:"internetGatewayId,omitempty"` + + // CarrierGatewayID is the id of the internet gateway associated with the VPC, + // for carrier network (Wavelength Zones). + // +optional + // +kubebuilder:validation:XValidation:rule="self.startsWith('cagw-')",message="Carrier Gateway ID must start with 'cagw-'" + CarrierGatewayID *string `json:"carrierGatewayId,omitempty"` + + // Tags is a collection of tags describing the resource. + Tags Tags `json:"tags,omitempty"` + + // AvailabilityZoneUsageLimit specifies the maximum number of availability zones (AZ) that + // should be used in a region when automatically creating subnets. If a region has more + // than this number of AZs then this number of AZs will be picked randomly when creating + // default subnets. Defaults to 3 + // +kubebuilder:default=3 + // +kubebuilder:validation:Minimum=1 + AvailabilityZoneUsageLimit *int `json:"availabilityZoneUsageLimit,omitempty"` + + // AvailabilityZoneSelection specifies how AZs should be selected if there are more AZs + // in a region than specified by AvailabilityZoneUsageLimit. There are 2 selection schemes: + // Ordered - selects based on alphabetical order + // Random - selects AZs randomly in a region + // Defaults to Ordered + // +kubebuilder:default=Ordered + // +kubebuilder:validation:Enum=Ordered;Random + AvailabilityZoneSelection *AZSelectionScheme `json:"availabilityZoneSelection,omitempty"` + + // EmptyRoutesDefaultVPCSecurityGroup specifies whether the default VPC security group ingress + // and egress rules should be removed. + // + // By default, when creating a VPC, AWS creates a security group called `default` with ingress and egress + // rules that allow traffic from anywhere. The group could be used as a potential surface attack and + // it's generally suggested that the group rules are removed or modified appropriately. + // + // NOTE: This only applies when the VPC is managed by the Cluster API AWS controller. + // + // +optional + EmptyRoutesDefaultVPCSecurityGroup bool `json:"emptyRoutesDefaultVPCSecurityGroup,omitempty"` + + // PrivateDNSHostnameTypeOnLaunch is the type of hostname to assign to instances in the subnet at launch. + // For IPv4-only and dual-stack (IPv4 and IPv6) subnets, an instance DNS name can be based on the instance IPv4 address (ip-name) + // or the instance ID (resource-name). For IPv6 only subnets, an instance DNS name must be based on the instance ID (resource-name). + // +optional + // +kubebuilder:validation:Enum:=ip-name;resource-name + PrivateDNSHostnameTypeOnLaunch *string `json:"privateDnsHostnameTypeOnLaunch,omitempty"` + + // ElasticIPPool contains specific configuration to allocate Public IPv4 address (Elastic IP) from user-defined pool + // brought to AWS for core infrastructure resources, like NAT Gateways and Public Network Load Balancers for + // the API Server. + // +optional + ElasticIPPool *ElasticIPPool `json:"elasticIpPool,omitempty"` + + // SubnetSchema specifies how CidrBlock should be divided on subnets in the VPC depending on the number of AZs. + // PreferPrivate - one private subnet for each AZ plus one other subnet that will be further sub-divided for the public subnets. + // PreferPublic - have the reverse logic of PreferPrivate, one public subnet for each AZ plus one other subnet + // that will be further sub-divided for the private subnets. + // Defaults to PreferPrivate + // +optional + // +kubebuilder:default=PreferPrivate + // +kubebuilder:validation:Enum=PreferPrivate;PreferPublic + SubnetSchema *SubnetSchemaType `json:"subnetSchema,omitempty"` +} + +// String returns a string representation of the VPC. +func (v *VPCSpec) String() string { + return fmt.Sprintf("id=%s", v.ID) +} + +// IsUnmanaged returns true if the VPC is unmanaged. +func (v *VPCSpec) IsUnmanaged(clusterName string) bool { + return v.ID != "" && !v.Tags.HasOwned(clusterName) +} + +// IsManaged returns true if VPC is managed. +func (v *VPCSpec) IsManaged(clusterName string) bool { + return !v.IsUnmanaged(clusterName) +} + +// IsIPv6Enabled returns true if the IPv6 block is defined on the network spec. +func (v *VPCSpec) IsIPv6Enabled() bool { + return v.IPv6 != nil +} + +// GetElasticIPPool returns the custom Elastic IP Pool configuration when present. +func (v *VPCSpec) GetElasticIPPool() *ElasticIPPool { + return v.ElasticIPPool +} + +// GetPublicIpv4Pool returns the custom public IPv4 pool brought to AWS when present. +func (v *VPCSpec) GetPublicIpv4Pool() *string { + if v.ElasticIPPool == nil { + return nil + } + if v.ElasticIPPool.PublicIpv4Pool != nil { + return v.ElasticIPPool.PublicIpv4Pool + } + return nil +} + +// SubnetSpec configures an AWS Subnet. +type SubnetSpec struct { + // ID defines a unique identifier to reference this resource. + // If you're bringing your subnet, set the AWS subnet-id here, it must start with `subnet-`. + // + // When the VPC is managed by CAPA, and you'd like the provider to create a subnet for you, + // the id can be set to any placeholder value that does not start with `subnet-`; + // upon creation, the subnet AWS identifier will be populated in the `ResourceID` field and + // the `id` field is going to be used as the subnet name. If you specify a tag + // called `Name`, it takes precedence. + ID string `json:"id"` + + // ResourceID is the subnet identifier from AWS, READ ONLY. + // This field is populated when the provider manages the subnet. + // +optional + ResourceID string `json:"resourceID,omitempty"` + + // CidrBlock is the CIDR block to be used when the provider creates a managed VPC. + CidrBlock string `json:"cidrBlock,omitempty"` + + // IPv6CidrBlock is the IPv6 CIDR block to be used when the provider creates a managed VPC. + // A subnet can have an IPv4 and an IPv6 address. + // IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object. + // +optional + IPv6CidrBlock string `json:"ipv6CidrBlock,omitempty"` + + // AvailabilityZone defines the availability zone to use for this subnet in the cluster's region. + AvailabilityZone string `json:"availabilityZone,omitempty"` + + // IsPublic defines the subnet as a public subnet. A subnet is public when it is associated with a route table that has a route to an internet gateway. + // +optional + IsPublic bool `json:"isPublic"` + + // IsIPv6 defines the subnet as an IPv6 subnet. A subnet is IPv6 when it is associated with a VPC that has IPv6 enabled. + // IPv6 is only supported in managed clusters, this field cannot be set on AWSCluster object. + // +optional + IsIPv6 bool `json:"isIpv6,omitempty"` + + // RouteTableID is the routing table id associated with the subnet. + // +optional + RouteTableID *string `json:"routeTableId,omitempty"` + + // NatGatewayID is the NAT gateway id associated with the subnet. + // Ignored unless the subnet is managed by the provider, in which case this is set on the public subnet where the NAT gateway resides. It is then used to determine routes for private subnets in the same AZ as the public subnet. + // +optional + NatGatewayID *string `json:"natGatewayId,omitempty"` + + // Tags is a collection of tags describing the resource. + Tags Tags `json:"tags,omitempty"` + + // ZoneType defines the type of the zone where the subnet is created. + // + // The valid values are availability-zone, local-zone, and wavelength-zone. + // + // Subnet with zone type availability-zone (regular) is always selected to create cluster + // resources, like Load Balancers, NAT Gateways, Contol Plane nodes, etc. + // + // Subnet with zone type local-zone or wavelength-zone is not eligible to automatically create + // regular cluster resources. + // + // The public subnet in availability-zone or local-zone is associated with regular public + // route table with default route entry to a Internet Gateway. + // + // The public subnet in wavelength-zone is associated with a carrier public + // route table with default route entry to a Carrier Gateway. + // + // The private subnet in the availability-zone is associated with a private route table with + // the default route entry to a NAT Gateway created in that zone. + // + // The private subnet in the local-zone or wavelength-zone is associated with a private route table with + // the default route entry re-using the NAT Gateway in the Region (preferred from the + // parent zone, the zone type availability-zone in the region, or first table available). + // + // +kubebuilder:validation:Enum=availability-zone;local-zone;wavelength-zone + // +optional + ZoneType *ZoneType `json:"zoneType,omitempty"` + + // ParentZoneName is the zone name where the current subnet's zone is tied when + // the zone is a Local Zone. + // + // The subnets in Local Zone or Wavelength Zone locations consume the ParentZoneName + // to select the correct private route table to egress traffic to the internet. + // + // +optional + ParentZoneName *string `json:"parentZoneName,omitempty"` +} + +// GetResourceID returns the identifier for this subnet, +// if the subnet was not created or reconciled, it returns the subnet ID. +func (s *SubnetSpec) GetResourceID() string { + if s.ResourceID != "" { + return s.ResourceID + } + return s.ID +} + +// String returns a string representation of the subnet. +func (s *SubnetSpec) String() string { + return fmt.Sprintf("id=%s/az=%s/public=%v", s.GetResourceID(), s.AvailabilityZone, s.IsPublic) +} + +// IsEdge returns the true when the subnet is created in the edge zone, +// Local Zones. +func (s *SubnetSpec) IsEdge() bool { + if s.ZoneType == nil { + return false + } + if s.ZoneType.Equal(ZoneTypeLocalZone) { + return true + } + if s.ZoneType.Equal(ZoneTypeWavelengthZone) { + return true + } + return false +} + +// IsEdgeWavelength returns true only when the subnet is created in Wavelength Zone. +func (s *SubnetSpec) IsEdgeWavelength() bool { + if s.ZoneType == nil { + return false + } + if *s.ZoneType == ZoneTypeWavelengthZone { + return true + } + return false +} + +// SetZoneInfo updates the subnets with zone information. +func (s *SubnetSpec) SetZoneInfo(zones []*ec2.AvailabilityZone) error { + zoneInfo := func(zoneName string) *ec2.AvailabilityZone { + for _, zone := range zones { + if aws.StringValue(zone.ZoneName) == zoneName { + return zone + } + } + return nil + } + + zone := zoneInfo(s.AvailabilityZone) + if zone == nil { + if len(s.AvailabilityZone) > 0 { + return fmt.Errorf("unable to update zone information for subnet '%v' and zone '%v'", s.ID, s.AvailabilityZone) + } + return fmt.Errorf("unable to update zone information for subnet '%v'", s.ID) + } + if zone.ZoneType != nil { + s.ZoneType = ptr.To(ZoneType(*zone.ZoneType)) + } + if zone.ParentZoneName != nil { + s.ParentZoneName = zone.ParentZoneName + } + return nil +} + +// Subnets is a slice of Subnet. +// +listType=map +// +listMapKey=id +type Subnets []SubnetSpec + +// ToMap returns a map from id to subnet. +func (s Subnets) ToMap() map[string]*SubnetSpec { + res := make(map[string]*SubnetSpec) + for i := range s { + x := s[i] + res[x.GetResourceID()] = &x + } + return res +} + +// IDs returns a slice of the subnet ids. +func (s Subnets) IDs() []string { + res := []string{} + for _, subnet := range s { + // Prevent returning edge zones (Local Zone) to regular Subnet IDs. + // Edge zones should not deploy control plane nodes, and does not support Nat Gateway and + // Network Load Balancers. Any resource for the core infrastructure should not consume edge + // zones. + if subnet.IsEdge() { + continue + } + res = append(res, subnet.GetResourceID()) + } + return res +} + +// IDsWithEdge returns a slice of the subnet ids. +func (s Subnets) IDsWithEdge() []string { + res := []string{} + for _, subnet := range s { + res = append(res, subnet.GetResourceID()) + } + return res +} + +// FindByID returns a single subnet matching the given id or nil. +// +// The returned pointer can be used to write back into the original slice. +func (s Subnets) FindByID(id string) *SubnetSpec { + for i := range s { + x := &(s[i]) // pointer to original structure + if x.GetResourceID() == id { + return x + } + } + return nil +} + +// FindEqual returns a subnet spec that is equal to the one passed in. +// Two subnets are defined equal to each other if their id is equal +// or if they are in the same vpc and the cidr block is the same. +// +// The returned pointer can be used to write back into the original slice. +func (s Subnets) FindEqual(spec *SubnetSpec) *SubnetSpec { + for i := range s { + x := &(s[i]) // pointer to original structure + if (spec.GetResourceID() != "" && x.GetResourceID() == spec.GetResourceID()) || + (spec.CidrBlock == x.CidrBlock) || + (spec.IPv6CidrBlock != "" && spec.IPv6CidrBlock == x.IPv6CidrBlock) { + return x + } + } + return nil +} + +// FilterPrivate returns a slice containing all subnets marked as private. +func (s Subnets) FilterPrivate() (res Subnets) { + for _, x := range s { + // Subnets in AWS Local Zones or Wavelength should not be used by core infrastructure. + if x.IsEdge() { + continue + } + if !x.IsPublic { + res = append(res, x) + } + } + return +} + +// FilterNonCni returns the subnets that are NOT intended for usage with the CNI pod network +// (i.e. do NOT have the `sigs.k8s.io/cluster-api-provider-aws/association=secondary` tag). +func (s Subnets) FilterNonCni() (res Subnets) { + for _, x := range s { + if x.Tags[NameAWSSubnetAssociation] != SecondarySubnetTagValue { + res = append(res, x) + } + } + return +} + +// FilterPublic returns a slice containing all subnets marked as public. +func (s Subnets) FilterPublic() (res Subnets) { + for _, x := range s { + // Subnets in AWS Local Zones or Wavelength should not be used by core infrastructure. + if x.IsEdge() { + continue + } + if x.IsPublic { + res = append(res, x) + } + } + return +} + +// FilterByZone returns a slice containing all subnets that live in the availability zone specified. +func (s Subnets) FilterByZone(zone string) (res Subnets) { + for _, x := range s { + if x.AvailabilityZone == zone { + res = append(res, x) + } + } + return +} + +// GetUniqueZones returns a slice containing the unique zones of the subnets. +func (s Subnets) GetUniqueZones() []string { + keys := make(map[string]bool) + zones := []string{} + for _, x := range s { + if _, value := keys[x.AvailabilityZone]; len(x.AvailabilityZone) > 0 && !value { + keys[x.AvailabilityZone] = true + zones = append(zones, x.AvailabilityZone) + } + } + return zones +} + +// SetZoneInfo updates the subnets with zone information. +func (s Subnets) SetZoneInfo(zones []*ec2.AvailabilityZone) error { + for i := range s { + if err := s[i].SetZoneInfo(zones); err != nil { + return err + } + } + return nil +} + +// HasPublicSubnetWavelength returns true when there are subnets in Wavelength zone. +func (s Subnets) HasPublicSubnetWavelength() bool { + for _, sub := range s { + if sub.ZoneType == nil { + return false + } + if sub.IsPublic && *sub.ZoneType == ZoneTypeWavelengthZone { + return true + } + } + return false +} + +// CNISpec defines configuration for CNI. +type CNISpec struct { + // CNIIngressRules specify rules to apply to control plane and worker node security groups. + // The source for the rule will be set to control plane and worker security group IDs. + CNIIngressRules CNIIngressRules `json:"cniIngressRules,omitempty"` +} + +// CNIIngressRules is a slice of CNIIngressRule. +type CNIIngressRules []CNIIngressRule + +// CNIIngressRule defines an AWS ingress rule for CNI requirements. +type CNIIngressRule struct { + Description string `json:"description"` + Protocol SecurityGroupProtocol `json:"protocol"` + FromPort int64 `json:"fromPort"` + ToPort int64 `json:"toPort"` +} + +// RouteTable defines an AWS routing table. +type RouteTable struct { + ID string `json:"id"` +} + +// SecurityGroupRole defines the unique role of a security group. +// +kubebuilder:validation:Enum=bastion;node;controlplane;apiserver-lb;lb;node-eks-additional +type SecurityGroupRole string + +var ( + // SecurityGroupBastion defines an SSH bastion role. + SecurityGroupBastion = SecurityGroupRole("bastion") + + // SecurityGroupNode defines a Kubernetes workload node role. + SecurityGroupNode = SecurityGroupRole("node") + + // SecurityGroupEKSNodeAdditional defines an extra node group from eks nodes. + SecurityGroupEKSNodeAdditional = SecurityGroupRole("node-eks-additional") + + // SecurityGroupControlPlane defines a Kubernetes control plane node role. + SecurityGroupControlPlane = SecurityGroupRole("controlplane") + + // SecurityGroupAPIServerLB defines a Kubernetes API Server Load Balancer role. + SecurityGroupAPIServerLB = SecurityGroupRole("apiserver-lb") + + // SecurityGroupLB defines a container for the cloud provider to inject its load balancer ingress rules. + SecurityGroupLB = SecurityGroupRole("lb") +) + +// SecurityGroup defines an AWS security group. +type SecurityGroup struct { + // ID is a unique identifier. + ID string `json:"id"` + + // Name is the security group name. + Name string `json:"name"` + + // IngressRules is the inbound rules associated with the security group. + // +optional + IngressRules IngressRules `json:"ingressRule,omitempty"` + + // Tags is a map of tags associated with the security group. + Tags Tags `json:"tags,omitempty"` +} + +// String returns a string representation of the security group. +func (s *SecurityGroup) String() string { + return fmt.Sprintf("id=%s/name=%s", s.ID, s.Name) +} + +// SecurityGroupProtocol defines the protocol type for a security group rule. +type SecurityGroupProtocol string + +var ( + // SecurityGroupProtocolAll is a wildcard for all IP protocols. + SecurityGroupProtocolAll = SecurityGroupProtocol("-1") + + // SecurityGroupProtocolIPinIP represents the IP in IP protocol in ingress rules. + SecurityGroupProtocolIPinIP = SecurityGroupProtocol("4") + + // SecurityGroupProtocolTCP represents the TCP protocol in ingress rules. + SecurityGroupProtocolTCP = SecurityGroupProtocol("tcp") + + // SecurityGroupProtocolUDP represents the UDP protocol in ingress rules. + SecurityGroupProtocolUDP = SecurityGroupProtocol("udp") + + // SecurityGroupProtocolICMP represents the ICMP protocol in ingress rules. + SecurityGroupProtocolICMP = SecurityGroupProtocol("icmp") + + // SecurityGroupProtocolICMPv6 represents the ICMPv6 protocol in ingress rules. + SecurityGroupProtocolICMPv6 = SecurityGroupProtocol("58") + + // SecurityGroupProtocolESP represents the ESP protocol in ingress rules. + SecurityGroupProtocolESP = SecurityGroupProtocol("50") +) + +// IngressRule defines an AWS ingress rule for security groups. +type IngressRule struct { + // Description provides extended information about the ingress rule. + Description string `json:"description"` + // Protocol is the protocol for the ingress rule. Accepted values are "-1" (all), "4" (IP in IP),"tcp", "udp", "icmp", and "58" (ICMPv6), "50" (ESP). + // +kubebuilder:validation:Enum="-1";"4";tcp;udp;icmp;"58";"50" + Protocol SecurityGroupProtocol `json:"protocol"` + // FromPort is the start of port range. + FromPort int64 `json:"fromPort"` + // ToPort is the end of port range. + ToPort int64 `json:"toPort"` + + // List of CIDR blocks to allow access from. Cannot be specified with SourceSecurityGroupID. + // +optional + CidrBlocks []string `json:"cidrBlocks,omitempty"` + + // List of IPv6 CIDR blocks to allow access from. Cannot be specified with SourceSecurityGroupID. + // +optional + IPv6CidrBlocks []string `json:"ipv6CidrBlocks,omitempty"` + + // The security group id to allow access from. Cannot be specified with CidrBlocks. + // +optional + SourceSecurityGroupIDs []string `json:"sourceSecurityGroupIds,omitempty"` + + // The security group role to allow access from. Cannot be specified with CidrBlocks. + // The field will be combined with source security group IDs if specified. + // +optional + SourceSecurityGroupRoles []SecurityGroupRole `json:"sourceSecurityGroupRoles,omitempty"` + + // NatGatewaysIPsSource use the NAT gateways IPs as the source for the ingress rule. + // +optional + NatGatewaysIPsSource bool `json:"natGatewaysIPsSource,omitempty"` +} + +// String returns a string representation of the ingress rule. +func (i IngressRule) String() string { + return fmt.Sprintf("protocol=%s/range=[%d-%d]/description=%s", i.Protocol, i.FromPort, i.ToPort, i.Description) +} + +// IngressRules is a slice of AWS ingress rules for security groups. +type IngressRules []IngressRule + +// Difference returns the difference between this slice and the other slice. +func (i IngressRules) Difference(o IngressRules) (out IngressRules) { + for index := range i { + x := i[index] + found := false + for oIndex := range o { + y := o[oIndex] + if x.Equals(&y) { + found = true + break + } + } + + if !found { + out = append(out, x) + } + } + + return +} + +// Equals returns true if two IngressRule are equal. +func (i *IngressRule) Equals(o *IngressRule) bool { + // ipv4 + if len(i.CidrBlocks) != len(o.CidrBlocks) { + return false + } + + sort.Strings(i.CidrBlocks) + sort.Strings(o.CidrBlocks) + + for i, v := range i.CidrBlocks { + if v != o.CidrBlocks[i] { + return false + } + } + // ipv6 + if len(i.IPv6CidrBlocks) != len(o.IPv6CidrBlocks) { + return false + } + + sort.Strings(i.IPv6CidrBlocks) + sort.Strings(o.IPv6CidrBlocks) + + for i, v := range i.IPv6CidrBlocks { + if v != o.IPv6CidrBlocks[i] { + return false + } + } + + if len(i.SourceSecurityGroupIDs) != len(o.SourceSecurityGroupIDs) { + return false + } + + sort.Strings(i.SourceSecurityGroupIDs) + sort.Strings(o.SourceSecurityGroupIDs) + + for i, v := range i.SourceSecurityGroupIDs { + if v != o.SourceSecurityGroupIDs[i] { + return false + } + } + + if i.Description != o.Description || i.Protocol != o.Protocol { + return false + } + + // AWS seems to ignore the From/To port when set on protocols where it doesn't apply, but + // we avoid serializing it out for clarity's sake. + // See: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IpPermission.html + switch i.Protocol { + case SecurityGroupProtocolTCP, + SecurityGroupProtocolUDP, + SecurityGroupProtocolICMP, + SecurityGroupProtocolICMPv6: + return i.FromPort == o.FromPort && i.ToPort == o.ToPort + case SecurityGroupProtocolAll, SecurityGroupProtocolIPinIP, SecurityGroupProtocolESP: + // FromPort / ToPort are not applicable + } + + return true +} + +// ZoneType defines listener AWS Availability Zone type. +type ZoneType string + +// String returns the string representation for the zone type. +func (z ZoneType) String() string { + return string(z) +} + +// Equal compares two zone types. +func (z ZoneType) Equal(other ZoneType) bool { + return z == other +} + +// ElasticIPPool allows configuring a Elastic IP pool for resources allocating +// public IPv4 addresses on public subnets. +type ElasticIPPool struct { + // PublicIpv4Pool sets a custom Public IPv4 Pool used to create Elastic IP address for resources + // created in public IPv4 subnets. Every IPv4 address, Elastic IP, will be allocated from the custom + // Public IPv4 pool that you brought to AWS, instead of Amazon-provided pool. The public IPv4 pool + // resource ID starts with 'ipv4pool-ec2'. + // + // +kubebuilder:validation:MaxLength=30 + // +optional + PublicIpv4Pool *string `json:"publicIpv4Pool,omitempty"` + + // PublicIpv4PoolFallBackOrder defines the fallback action when the Public IPv4 Pool has been exhausted, + // no more IPv4 address available in the pool. + // + // When set to 'amazon-pool', the controller check if the pool has available IPv4 address, when pool has reached the + // IPv4 limit, the address will be claimed from Amazon-pool (default). + // + // When set to 'none', the controller will fail the Elastic IP allocation when the publicIpv4Pool is exhausted. + // + // +kubebuilder:validation:Enum:=amazon-pool;none + // +optional + PublicIpv4PoolFallBackOrder *PublicIpv4PoolFallbackOrder `json:"publicIpv4PoolFallbackOrder,omitempty"` + + // TODO(mtulio): add future support of user-defined Elastic IP to allow users to assign BYO Public IP from + // 'static'/preallocated amazon-provided IPsstrucute currently holds only 'BYO Public IP from Public IPv4 Pool' (user brought to AWS), + // although a dedicated structure would help to hold 'BYO Elastic IP' variants like: + // - AllocationIdPoolApiLoadBalancer: an user-defined (static) IP address to the Public API Load Balancer. + // - AllocationIdPoolNatGateways: an user-defined (static) IP address to allocate to NAT Gateways (egress traffic). +} + +// PublicIpv4PoolFallbackOrder defines the list of available fallback action when the PublicIpv4Pool is exhausted. +// 'none' let the controllers return failures when the PublicIpv4Pool is exhausted - no more IPv4 available. +// 'amazon-pool' let the controllers to skip the PublicIpv4Pool and use the Amazon pool, the default. +// +kubebuilder:validation:XValidation:rule="self in ['none','amazon-pool']",message="allowed values are 'none' and 'amazon-pool'" +type PublicIpv4PoolFallbackOrder string + +const ( + // PublicIpv4PoolFallbackOrderAmazonPool refers to use Amazon-pool Public IPv4 Pool as a fallback strategy. + PublicIpv4PoolFallbackOrderAmazonPool = PublicIpv4PoolFallbackOrder("amazon-pool") + + // PublicIpv4PoolFallbackOrderNone refers to not use any fallback strategy. + PublicIpv4PoolFallbackOrderNone = PublicIpv4PoolFallbackOrder("none") +) + +func (r PublicIpv4PoolFallbackOrder) String() string { + return string(r) +} + +// Equal compares PublicIpv4PoolFallbackOrder types and return true if input param is equal. +func (r PublicIpv4PoolFallbackOrder) Equal(e PublicIpv4PoolFallbackOrder) bool { + return r == e +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/s3bucket.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/s3bucket.go new file mode 100644 index 000000000..777c0a4cf --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/s3bucket.go @@ -0,0 +1,83 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "fmt" + "net" + + "k8s.io/apimachinery/pkg/util/validation/field" + + "sigs.k8s.io/cluster-api-provider-aws/v2/feature" +) + +// Validate validates S3Bucket fields. +func (b *S3Bucket) Validate() []*field.Error { + var errs field.ErrorList + + if b == nil { + return errs + } + + if b.Name == "" { + errs = append(errs, field.Required(field.NewPath("spec", "s3Bucket", "name"), "can't be empty")) + } + + // Feature gate is not enabled but ignition is enabled then send a forbidden error. + if !feature.Gates.Enabled(feature.BootstrapFormatIgnition) { + errs = append(errs, field.Forbidden(field.NewPath("spec", "s3Bucket"), + "can be set only if the BootstrapFormatIgnition feature gate is enabled")) + } + + if b.PresignedURLDuration == nil { + if b.ControlPlaneIAMInstanceProfile == "" { + errs = append(errs, + field.Required(field.NewPath("spec", "s3Bucket", "controlPlaneIAMInstanceProfiles"), "can't be empty")) + } + + if len(b.NodesIAMInstanceProfiles) == 0 { + errs = append(errs, + field.Required(field.NewPath("spec", "s3Bucket", "nodesIAMInstanceProfiles"), "can't be empty")) + } + + for i, iamInstanceProfile := range b.NodesIAMInstanceProfiles { + if iamInstanceProfile == "" { + errs = append(errs, + field.Required(field.NewPath("spec", "s3Bucket", fmt.Sprintf("nodesIAMInstanceProfiles[%d]", i)), "can't be empty")) + } + } + } + + if b.Name != "" { + errs = append(errs, validateS3BucketName(b.Name)...) + } + + return errs +} + +// Validation rules taken from https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html. +func validateS3BucketName(name string) []*field.Error { + var errs field.ErrorList + + path := field.NewPath("spec", "s3Bucket", "name") + + if net.ParseIP(name) != nil { + errs = append(errs, field.Invalid(path, name, "must not be formatted as an IP address (for example, 192.168.5.4)")) + } + + return errs +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/tags.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/tags.go new file mode 100644 index 000000000..e6e0ea7e7 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/tags.go @@ -0,0 +1,269 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "fmt" + "regexp" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation/field" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +// Tags defines a map of tags. +type Tags map[string]string + +// HasOwned returns true if the tags contains a tag that marks the resource as owned by the cluster from the perspective of this management tooling. +func (t Tags) HasOwned(cluster string) bool { + value, ok := t[ClusterTagKey(cluster)] + return ok && ResourceLifecycle(value) == ResourceLifecycleOwned +} + +// HasAWSCloudProviderOwned returns true if the tags contains a tag that marks the resource as owned by the cluster from the perspective of the in-tree cloud provider. +func (t Tags) HasAWSCloudProviderOwned(cluster string) bool { + value, ok := t[ClusterAWSCloudProviderTagKey(cluster)] + return ok && ResourceLifecycle(value) == ResourceLifecycleOwned +} + +// GetRole returns the Cluster API role for the tagged resource. +func (t Tags) GetRole() string { + return t[NameAWSClusterAPIRole] +} + +// Difference returns the difference between this map of tags and the other map of tags. +// Items are considered equals if key and value are equals. +func (t Tags) Difference(other Tags) Tags { + res := make(Tags, len(t)) + + for key, value := range t { + if otherValue, ok := other[key]; ok && value == otherValue { + continue + } + res[key] = value + } + + return res +} + +// Merge merges in tags from other. If a tag already exists, it is replaced by the tag in other. +func (t Tags) Merge(other Tags) { + for k, v := range other { + t[k] = v + } +} + +// Validate checks if tags are valid for the AWS API/Resources. +// Keys must have at least 1 and max 128 characters. +// Values must be max 256 characters long. +// Keys and Values can only have alphabets, numbers, spaces and _ . : / = + - @ as characters. +// Tag's key cannot have prefix "aws:". +// Max count of User tags for a specific resource can be 50. +func (t Tags) Validate() []*field.Error { + // Defines the maximum number of user tags which can be created for a specific resource + const maxUserTagsAllowed = 50 + var errs field.ErrorList + var userTagCount = len(t) + re := regexp.MustCompile(`^[a-zA-Z0-9\s\_\.\:\=\+\-\@\/]*$`) + + for k, v := range t { + if len(k) < 1 { + errs = append(errs, + field.Invalid(field.NewPath("spec", "additionalTags"), k, "key cannot be empty"), + ) + } + if len(k) > 128 { + errs = append(errs, + field.Invalid(field.NewPath("spec", "additionalTags"), k, "key cannot be longer than 128 characters"), + ) + } + if len(v) > 256 { + errs = append(errs, + field.Invalid(field.NewPath("spec", "additionalTags"), v, "value cannot be longer than 256 characters"), + ) + } + if wrongUserTagNomenclature(k) { + errs = append(errs, + field.Invalid(field.NewPath("spec", "additionalTags"), k, "user created tag's key cannot have prefix aws:"), + ) + } + val := re.MatchString(k) + if !val { + errs = append(errs, + field.Invalid(field.NewPath("spec", "additionalTags"), k, "key cannot have characters other than alphabets, numbers, spaces and _ . : / = + - @ ."), + ) + } + val = re.MatchString(v) + if !val { + errs = append(errs, + field.Invalid(field.NewPath("spec", "additionalTags"), v, "value cannot have characters other than alphabets, numbers, spaces and _ . : / = + - @ ."), + ) + } + } + + if userTagCount > maxUserTagsAllowed { + errs = append(errs, + field.Invalid(field.NewPath("spec", "additionalTags"), t, "user created tags cannot be more than 50"), + ) + } + + return errs +} + +// Checks whether the tag created is user tag or not. +func wrongUserTagNomenclature(k string) bool { + return len(k) > 3 && k[0:4] == "aws:" +} + +// ResourceLifecycle configures the lifecycle of a resource. +type ResourceLifecycle string + +const ( + // ResourceLifecycleOwned is the value we use when tagging resources to indicate + // that the resource is considered owned and managed by the cluster, + // and in particular that the lifecycle is tied to the lifecycle of the cluster. + ResourceLifecycleOwned = ResourceLifecycle("owned") + + // ResourceLifecycleShared is the value we use when tagging resources to indicate + // that the resource is shared between multiple clusters, and should not be destroyed + // if the cluster is destroyed. + ResourceLifecycleShared = ResourceLifecycle("shared") + + // NameKubernetesAWSCloudProviderPrefix is the tag name used by the cloud provider to logically + // separate independent cluster resources. We use it to identify which resources we expect + // to be permissive about state changes. + // logically independent clusters running in the same AZ. + // The tag key = NameKubernetesAWSCloudProviderPrefix + clusterID + // The tag value is an ownership value. + NameKubernetesAWSCloudProviderPrefix = "kubernetes.io/cluster/" + + // NameAWSProviderPrefix is the tag prefix we use to differentiate + // cluster-api-provider-aws owned components from other tooling that + // uses NameKubernetesClusterPrefix. + NameAWSProviderPrefix = "sigs.k8s.io/cluster-api-provider-aws/" + + // NameAWSProviderOwned is the tag name we use to differentiate + // cluster-api-provider-aws owned components from other tooling that + // uses NameKubernetesClusterPrefix. + NameAWSProviderOwned = NameAWSProviderPrefix + "cluster/" + + // NameAWSClusterAPIRole is the tag name we use to mark roles for resources + // dedicated to this cluster api provider implementation. + NameAWSClusterAPIRole = NameAWSProviderPrefix + "role" + + // NameAWSSubnetAssociation is the tag name we use to mark association for resources + // dedicated to this cluster api provider implementation. + NameAWSSubnetAssociation = NameAWSProviderPrefix + "association" + + // SecondarySubnetTagValue is the secondary subnet tag constant value. + SecondarySubnetTagValue = "secondary" + + // APIServerRoleTagValue describes the value for the apiserver role. + APIServerRoleTagValue = "apiserver" + + // BastionRoleTagValue describes the value for the bastion role. + BastionRoleTagValue = "bastion" + + // CommonRoleTagValue describes the value for the common role. + CommonRoleTagValue = "common" + + // PublicRoleTagValue describes the value for the public role. + PublicRoleTagValue = "public" + + // PrivateRoleTagValue describes the value for the private role. + PrivateRoleTagValue = "private" + + // MachineNameTagKey is the key for machine name. + MachineNameTagKey = "MachineName" + + // LaunchTemplateBootstrapDataSecret is the tag we use to store the `/` + // of the bootstrap secret that was used to create the user data for the latest launch + // template version. + LaunchTemplateBootstrapDataSecret = NameAWSProviderPrefix + "bootstrap-data-secret" +) + +// ClusterTagKey generates the key for resources associated with a cluster. +func ClusterTagKey(name string) string { + return fmt.Sprintf("%s%s", NameAWSProviderOwned, name) +} + +// ClusterAWSCloudProviderTagKey generates the key for resources associated a cluster's AWS cloud provider. +func ClusterAWSCloudProviderTagKey(name string) string { + return fmt.Sprintf("%s%s", NameKubernetesAWSCloudProviderPrefix, name) +} + +// BuildParams is used to build tags around an aws resource. +type BuildParams struct { + // Lifecycle determines the resource lifecycle. + Lifecycle ResourceLifecycle + + // ClusterName is the cluster associated with the resource. + ClusterName string + + // ResourceID is the unique identifier of the resource to be tagged. + ResourceID string + + // Name is the name of the resource, it's applied as the tag "Name" on AWS. + // +optional + Name *string + + // Role is the role associated to the resource. + // +optional + Role *string + + // Any additional tags to be added to the resource. + // +optional + Additional Tags +} + +// WithMachineName tags the namespaced machine name +// The machine name will be tagged with key "MachineName". +func (b BuildParams) WithMachineName(m *clusterv1.Machine) BuildParams { + machineNamespacedName := types.NamespacedName{Namespace: m.Namespace, Name: m.Name} + b.Additional[MachineNameTagKey] = machineNamespacedName.String() + return b +} + +// WithCloudProvider tags the cluster ownership for a resource. +func (b BuildParams) WithCloudProvider(name string) BuildParams { + b.Additional[ClusterAWSCloudProviderTagKey(name)] = string(ResourceLifecycleOwned) + return b +} + +// Build builds tags including the cluster tag and returns them in map form. +func Build(params BuildParams) Tags { + tags := make(Tags) + + // Add the name tag first so that it can be overwritten by a user-provided tag in the `Additional` tags. + if params.Name != nil { + tags["Name"] = *params.Name + } + + for k, v := range params.Additional { + tags[k] = v + } + + if params.ClusterName != "" { + tags[ClusterTagKey(params.ClusterName)] = string(params.Lifecycle) + } + if params.Role != nil { + tags[NameAWSClusterAPIRole] = *params.Role + } + + return tags +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/types.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/types.go new file mode 100644 index 000000000..978d5310f --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/types.go @@ -0,0 +1,469 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "strings" + + "k8s.io/apimachinery/pkg/util/sets" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +const ( + // PreventDeletionLabel can be used in situations where preventing delation is allowed. The docs + // and the CRD will call this out where its allowed. + PreventDeletionLabel = "aws.cluster.x-k8s.io/prevent-deletion" +) + +// AWSResourceReference is a reference to a specific AWS resource by ID or filters. +// Only one of ID or Filters may be specified. Specifying more than one will result in +// a validation error. +type AWSResourceReference struct { + // ID of resource + // +optional + ID *string `json:"id,omitempty"` + + // Filters is a set of key/value pairs used to identify a resource + // They are applied according to the rules defined by the AWS API: + // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html + // +optional + Filters []Filter `json:"filters,omitempty"` +} + +// AMIReference is a reference to a specific AWS resource by ID, ARN, or filters. +// Only one of ID, ARN or Filters may be specified. Specifying more than one will result in +// a validation error. +type AMIReference struct { + // ID of resource + // +optional + ID *string `json:"id,omitempty"` + + // EKSOptimizedLookupType If specified, will look up an EKS Optimized image in SSM Parameter store + // +kubebuilder:validation:Enum:=AmazonLinux;AmazonLinuxGPU + // +optional + EKSOptimizedLookupType *EKSAMILookupType `json:"eksLookupType,omitempty"` +} + +// Filter is a filter used to identify an AWS resource. +type Filter struct { + // Name of the filter. Filter names are case-sensitive. + Name string `json:"name"` + + // Values includes one or more filter values. Filter values are case-sensitive. + Values []string `json:"values"` +} + +// AWSMachineProviderConditionType is a valid value for AWSMachineProviderCondition.Type. +type AWSMachineProviderConditionType string + +// Valid conditions for an AWS machine instance. +const ( + // MachineCreated indicates whether the machine has been created or not. If not, + // it should include a reason and message for the failure. + MachineCreated AWSMachineProviderConditionType = "MachineCreated" +) + +const ( + // ExternalResourceGCAnnotation is the name of an annotation that indicates if + // external resources should be garbage collected for the cluster. + ExternalResourceGCAnnotation = "aws.cluster.x-k8s.io/external-resource-gc" + + // ExternalResourceGCTasksAnnotation is the name of an annotation that indicates what + // external resources tasks should be executed by garbage collector for the cluster. + ExternalResourceGCTasksAnnotation = "aws.cluster.x-k8s.io/external-resource-tasks-gc" +) + +// GCTask defines a task to be executed by the garbage collector. +type GCTask string + +var ( + // GCTaskLoadBalancer defines a task to cleaning up resources for AWS load balancers. + GCTaskLoadBalancer = GCTask("load-balancer") + + // GCTaskTargetGroup defines a task to cleaning up resources for AWS target groups. + GCTaskTargetGroup = GCTask("target-group") + + // GCTaskSecurityGroup defines a task to cleaning up resources for AWS security groups. + GCTaskSecurityGroup = GCTask("security-group") +) + +// AZSelectionScheme defines the scheme of selecting AZs. +type AZSelectionScheme string + +var ( + // AZSelectionSchemeOrdered will select AZs based on alphabetical order. + AZSelectionSchemeOrdered = AZSelectionScheme("Ordered") + + // AZSelectionSchemeRandom will select AZs randomly. + AZSelectionSchemeRandom = AZSelectionScheme("Random") +) + +// InstanceState describes the state of an AWS instance. +type InstanceState string + +var ( + // InstanceStatePending is the string representing an instance in a pending state. + InstanceStatePending = InstanceState("pending") + + // InstanceStateRunning is the string representing an instance in a running state. + InstanceStateRunning = InstanceState("running") + + // InstanceStateShuttingDown is the string representing an instance shutting down. + InstanceStateShuttingDown = InstanceState("shutting-down") + + // InstanceStateTerminated is the string representing an instance that has been terminated. + InstanceStateTerminated = InstanceState("terminated") + + // InstanceStateStopping is the string representing an instance + // that is in the process of being stopped and can be restarted. + InstanceStateStopping = InstanceState("stopping") + + // InstanceStateStopped is the string representing an instance + // that has been stopped and can be restarted. + InstanceStateStopped = InstanceState("stopped") + + // InstanceRunningStates defines the set of states in which an EC2 instance is + // running or going to be running soon. + InstanceRunningStates = sets.NewString( + string(InstanceStatePending), + string(InstanceStateRunning), + ) + + // InstanceOperationalStates defines the set of states in which an EC2 instance is + // or can return to running, and supports all EC2 operations. + InstanceOperationalStates = InstanceRunningStates.Union( + sets.NewString( + string(InstanceStateStopping), + string(InstanceStateStopped), + ), + ) + + // InstanceKnownStates represents all known EC2 instance states. + InstanceKnownStates = InstanceOperationalStates.Union( + sets.NewString( + string(InstanceStateShuttingDown), + string(InstanceStateTerminated), + ), + ) +) + +// Instance describes an AWS instance. +type Instance struct { + ID string `json:"id"` + + // The current state of the instance. + State InstanceState `json:"instanceState,omitempty"` + + // The instance type. + Type string `json:"type,omitempty"` + + // The ID of the subnet of the instance. + SubnetID string `json:"subnetId,omitempty"` + + // The ID of the AMI used to launch the instance. + ImageID string `json:"imageId,omitempty"` + + // The name of the SSH key pair. + SSHKeyName *string `json:"sshKeyName,omitempty"` + + // SecurityGroupIDs are one or more security group IDs this instance belongs to. + SecurityGroupIDs []string `json:"securityGroupIds,omitempty"` + + // UserData is the raw data script passed to the instance which is run upon bootstrap. + // This field must not be base64 encoded and should only be used when running a new instance. + UserData *string `json:"userData,omitempty"` + + // The name of the IAM instance profile associated with the instance, if applicable. + IAMProfile string `json:"iamProfile,omitempty"` + + // Addresses contains the AWS instance associated addresses. + Addresses []clusterv1.MachineAddress `json:"addresses,omitempty"` + + // The private IPv4 address assigned to the instance. + PrivateIP *string `json:"privateIp,omitempty"` + + // The public IPv4 address assigned to the instance, if applicable. + PublicIP *string `json:"publicIp,omitempty"` + + // Specifies whether enhanced networking with ENA is enabled. + ENASupport *bool `json:"enaSupport,omitempty"` + + // Indicates whether the instance is optimized for Amazon EBS I/O. + EBSOptimized *bool `json:"ebsOptimized,omitempty"` + + // Configuration options for the root storage volume. + // +optional + RootVolume *Volume `json:"rootVolume,omitempty"` + + // Configuration options for the non root storage volumes. + // +optional + NonRootVolumes []Volume `json:"nonRootVolumes,omitempty"` + + // Specifies ENIs attached to instance + NetworkInterfaces []string `json:"networkInterfaces,omitempty"` + + // The tags associated with the instance. + Tags map[string]string `json:"tags,omitempty"` + + // Availability zone of instance + AvailabilityZone string `json:"availabilityZone,omitempty"` + + // SpotMarketOptions option for configuring instances to be run using AWS Spot instances. + SpotMarketOptions *SpotMarketOptions `json:"spotMarketOptions,omitempty"` + + // PlacementGroupName specifies the name of the placement group in which to launch the instance. + // +optional + PlacementGroupName string `json:"placementGroupName,omitempty"` + + // PlacementGroupPartition is the partition number within the placement group in which to launch the instance. + // This value is only valid if the placement group, referred in `PlacementGroupName`, was created with + // strategy set to partition. + // +kubebuilder:validation:Minimum:=1 + // +kubebuilder:validation:Maximum:=7 + // +optional + PlacementGroupPartition int64 `json:"placementGroupPartition,omitempty"` + + // Tenancy indicates if instance should run on shared or single-tenant hardware. + // +optional + Tenancy string `json:"tenancy,omitempty"` + + // IDs of the instance's volumes + // +optional + VolumeIDs []string `json:"volumeIDs,omitempty"` + + // InstanceMetadataOptions is the metadata options for the EC2 instance. + // +optional + InstanceMetadataOptions *InstanceMetadataOptions `json:"instanceMetadataOptions,omitempty"` + + // PrivateDNSName is the options for the instance hostname. + // +optional + PrivateDNSName *PrivateDNSName `json:"privateDnsName,omitempty"` + + // PublicIPOnLaunch is the option to associate a public IP on instance launch + // +optional + PublicIPOnLaunch *bool `json:"publicIPOnLaunch,omitempty"` + + // CapacityReservationID specifies the target Capacity Reservation into which the instance should be launched. + // +optional + CapacityReservationID *string `json:"capacityReservationId,omitempty"` +} + +// InstanceMetadataState describes the state of InstanceMetadataOptions.HttpEndpoint and InstanceMetadataOptions.InstanceMetadataTags +type InstanceMetadataState string + +const ( + // InstanceMetadataEndpointStateDisabled represents the disabled state + InstanceMetadataEndpointStateDisabled = InstanceMetadataState("disabled") + + // InstanceMetadataEndpointStateEnabled represents the enabled state + InstanceMetadataEndpointStateEnabled = InstanceMetadataState("enabled") +) + +// HTTPTokensState describes the state of InstanceMetadataOptions.HTTPTokensState +type HTTPTokensState string + +const ( + // HTTPTokensStateOptional represents the optional state + HTTPTokensStateOptional = HTTPTokensState("optional") + + // HTTPTokensStateRequired represents the required state (IMDSv2) + HTTPTokensStateRequired = HTTPTokensState("required") +) + +// InstanceMetadataOptions describes metadata options for the EC2 instance. +type InstanceMetadataOptions struct { + // Enables or disables the HTTP metadata endpoint on your instances. + // + // If you specify a value of disabled, you cannot access your instance metadata. + // + // Default: enabled + // + // +kubebuilder:validation:Enum:=enabled;disabled + // +kubebuilder:default=enabled + HTTPEndpoint InstanceMetadataState `json:"httpEndpoint,omitempty"` + + // The desired HTTP PUT response hop limit for instance metadata requests. The + // larger the number, the further instance metadata requests can travel. + // + // Default: 1 + // + // +kubebuilder:validation:Minimum:=1 + // +kubebuilder:validation:Maximum:=64 + // +kubebuilder:default=1 + HTTPPutResponseHopLimit int64 `json:"httpPutResponseHopLimit,omitempty"` + + // The state of token usage for your instance metadata requests. + // + // If the state is optional, you can choose to retrieve instance metadata with + // or without a session token on your request. If you retrieve the IAM role + // credentials without a token, the version 1.0 role credentials are returned. + // If you retrieve the IAM role credentials using a valid session token, the + // version 2.0 role credentials are returned. + // + // If the state is required, you must send a session token with any instance + // metadata retrieval requests. In this state, retrieving the IAM role credentials + // always returns the version 2.0 credentials; the version 1.0 credentials are + // not available. + // + // Default: optional + // + // +kubebuilder:validation:Enum:=optional;required + // +kubebuilder:default=optional + HTTPTokens HTTPTokensState `json:"httpTokens,omitempty"` + + // Set to enabled to allow access to instance tags from the instance metadata. + // Set to disabled to turn off access to instance tags from the instance metadata. + // For more information, see Work with instance tags using the instance metadata + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS). + // + // Default: disabled + // + // +kubebuilder:validation:Enum:=enabled;disabled + // +kubebuilder:default=disabled + InstanceMetadataTags InstanceMetadataState `json:"instanceMetadataTags,omitempty"` +} + +// SetDefaults sets the default values for the InstanceMetadataOptions. +func (obj *InstanceMetadataOptions) SetDefaults() { + if obj.HTTPEndpoint == "" { + obj.HTTPEndpoint = InstanceMetadataEndpointStateEnabled + } + if obj.HTTPPutResponseHopLimit == 0 { + obj.HTTPPutResponseHopLimit = 1 + } + if obj.HTTPTokens == "" { + obj.HTTPTokens = HTTPTokensStateOptional // Defaults to IMDSv1 + } + if obj.InstanceMetadataTags == "" { + obj.InstanceMetadataTags = InstanceMetadataEndpointStateDisabled + } +} + +// Volume encapsulates the configuration options for the storage device. +type Volume struct { + // Device name + // +optional + DeviceName string `json:"deviceName,omitempty"` + + // Size specifies size (in Gi) of the storage device. + // Must be greater than the image snapshot size or 8 (whichever is greater). + // +kubebuilder:validation:Minimum=8 + Size int64 `json:"size"` + + // Type is the type of the volume (e.g. gp2, io1, etc...). + // +optional + Type VolumeType `json:"type,omitempty"` + + // IOPS is the number of IOPS requested for the disk. Not applicable to all types. + // +optional + IOPS int64 `json:"iops,omitempty"` + + // Throughput to provision in MiB/s supported for the volume type. Not applicable to all types. + // +optional + Throughput *int64 `json:"throughput,omitempty"` + + // Encrypted is whether the volume should be encrypted or not. + // +optional + Encrypted *bool `json:"encrypted,omitempty"` + + // EncryptionKey is the KMS key to use to encrypt the volume. Can be either a KMS key ID or ARN. + // If Encrypted is set and this is omitted, the default AWS key will be used. + // The key must already exist and be accessible by the controller. + // +optional + EncryptionKey string `json:"encryptionKey,omitempty"` +} + +// VolumeType describes the EBS volume type. +// See: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html +type VolumeType string + +var ( + // VolumeTypeIO1 is the string representing a provisioned iops ssd io1 volume. + VolumeTypeIO1 = VolumeType("io1") + + // VolumeTypeIO2 is the string representing a provisioned iops ssd io2 volume. + VolumeTypeIO2 = VolumeType("io2") + + // VolumeTypeGP2 is the string representing a general purpose ssd gp2 volume. + VolumeTypeGP2 = VolumeType("gp2") + + // VolumeTypeGP3 is the string representing a general purpose ssd gp3 volume. + VolumeTypeGP3 = VolumeType("gp3") + + // VolumeTypesGP are volume types provisioned for general purpose io. + VolumeTypesGP = sets.NewString( + string(VolumeTypeIO1), + string(VolumeTypeIO2), + ) + + // VolumeTypesProvisioned are volume types provisioned for high performance io. + VolumeTypesProvisioned = sets.NewString( + string(VolumeTypeIO1), + string(VolumeTypeIO2), + ) +) + +// SpotMarketOptions defines the options available to a user when configuring +// Machines to run on Spot instances. +// Most users should provide an empty struct. +type SpotMarketOptions struct { + // MaxPrice defines the maximum price the user is willing to pay for Spot VM instances + // +optional + // +kubebuilder:validation:pattern="^[0-9]+(\.[0-9]+)?$" + MaxPrice *string `json:"maxPrice,omitempty"` +} + +// EKSAMILookupType specifies which AWS AMI to use for a AWSMachine and AWSMachinePool. +type EKSAMILookupType string + +const ( + // AmazonLinux is the default AMI type. + AmazonLinux EKSAMILookupType = "AmazonLinux" + // AmazonLinuxGPU is the AmazonLinux GPU AMI type. + AmazonLinuxGPU EKSAMILookupType = "AmazonLinuxGPU" +) + +// PrivateDNSName is the options for the instance hostname. +type PrivateDNSName struct { + // EnableResourceNameDNSAAAARecord indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. + // +optional + EnableResourceNameDNSAAAARecord *bool `json:"enableResourceNameDnsAAAARecord,omitempty"` + // EnableResourceNameDNSARecord indicates whether to respond to DNS queries for instance hostnames with DNS A records. + // +optional + EnableResourceNameDNSARecord *bool `json:"enableResourceNameDnsARecord,omitempty"` + // The type of hostname to assign to an instance. + // +optional + // +kubebuilder:validation:Enum:=ip-name;resource-name + HostnameType *string `json:"hostnameType,omitempty"` +} + +// SubnetSchemaType specifies how given network should be divided on subnets +// in the VPC depending on the number of AZs. +type SubnetSchemaType string + +// Name returns subnet schema type name without prefix. +func (s *SubnetSchemaType) Name() string { + return strings.ToLower(strings.TrimPrefix(string(*s), "Prefer")) +} + +var ( + // SubnetSchemaPreferPrivate allocates more subnets in the VPC to private subnets. + SubnetSchemaPreferPrivate = SubnetSchemaType("PreferPrivate") + // SubnetSchemaPreferPublic allocates more subnets in the VPC to public subnets. + SubnetSchemaPreferPublic = SubnetSchemaType("PreferPublic") +) diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/webhooks.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/webhooks.go new file mode 100644 index 000000000..b5e444b76 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/webhooks.go @@ -0,0 +1,35 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func aggregateObjErrors(gk schema.GroupKind, name string, allErrs field.ErrorList) error { + if len(allErrs) == 0 { + return nil + } + + return apierrors.NewInvalid( + gk, + name, + allErrs, + ) +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..01a210366 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,2245 @@ +//go:build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/errors" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AMIReference) DeepCopyInto(out *AMIReference) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.EKSOptimizedLookupType != nil { + in, out := &in.EKSOptimizedLookupType, &out.EKSOptimizedLookupType + *out = new(EKSAMILookupType) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AMIReference. +func (in *AMIReference) DeepCopy() *AMIReference { + if in == nil { + return nil + } + out := new(AMIReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSCluster) DeepCopyInto(out *AWSCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSCluster. +func (in *AWSCluster) DeepCopy() *AWSCluster { + if in == nil { + return nil + } + out := new(AWSCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSClusterControllerIdentity) DeepCopyInto(out *AWSClusterControllerIdentity) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterControllerIdentity. +func (in *AWSClusterControllerIdentity) DeepCopy() *AWSClusterControllerIdentity { + if in == nil { + return nil + } + out := new(AWSClusterControllerIdentity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSClusterControllerIdentity) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSClusterControllerIdentityList) DeepCopyInto(out *AWSClusterControllerIdentityList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AWSClusterControllerIdentity, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterControllerIdentityList. +func (in *AWSClusterControllerIdentityList) DeepCopy() *AWSClusterControllerIdentityList { + if in == nil { + return nil + } + out := new(AWSClusterControllerIdentityList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSClusterControllerIdentityList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSClusterControllerIdentitySpec) DeepCopyInto(out *AWSClusterControllerIdentitySpec) { + *out = *in + in.AWSClusterIdentitySpec.DeepCopyInto(&out.AWSClusterIdentitySpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterControllerIdentitySpec. +func (in *AWSClusterControllerIdentitySpec) DeepCopy() *AWSClusterControllerIdentitySpec { + if in == nil { + return nil + } + out := new(AWSClusterControllerIdentitySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSClusterIdentitySpec) DeepCopyInto(out *AWSClusterIdentitySpec) { + *out = *in + if in.AllowedNamespaces != nil { + in, out := &in.AllowedNamespaces, &out.AllowedNamespaces + *out = new(AllowedNamespaces) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterIdentitySpec. +func (in *AWSClusterIdentitySpec) DeepCopy() *AWSClusterIdentitySpec { + if in == nil { + return nil + } + out := new(AWSClusterIdentitySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSClusterList) DeepCopyInto(out *AWSClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AWSCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterList. +func (in *AWSClusterList) DeepCopy() *AWSClusterList { + if in == nil { + return nil + } + out := new(AWSClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSClusterRoleIdentity) DeepCopyInto(out *AWSClusterRoleIdentity) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterRoleIdentity. +func (in *AWSClusterRoleIdentity) DeepCopy() *AWSClusterRoleIdentity { + if in == nil { + return nil + } + out := new(AWSClusterRoleIdentity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSClusterRoleIdentity) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSClusterRoleIdentityList) DeepCopyInto(out *AWSClusterRoleIdentityList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AWSClusterRoleIdentity, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterRoleIdentityList. +func (in *AWSClusterRoleIdentityList) DeepCopy() *AWSClusterRoleIdentityList { + if in == nil { + return nil + } + out := new(AWSClusterRoleIdentityList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSClusterRoleIdentityList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSClusterRoleIdentitySpec) DeepCopyInto(out *AWSClusterRoleIdentitySpec) { + *out = *in + in.AWSClusterIdentitySpec.DeepCopyInto(&out.AWSClusterIdentitySpec) + in.AWSRoleSpec.DeepCopyInto(&out.AWSRoleSpec) + if in.SourceIdentityRef != nil { + in, out := &in.SourceIdentityRef, &out.SourceIdentityRef + *out = new(AWSIdentityReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterRoleIdentitySpec. +func (in *AWSClusterRoleIdentitySpec) DeepCopy() *AWSClusterRoleIdentitySpec { + if in == nil { + return nil + } + out := new(AWSClusterRoleIdentitySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSClusterSpec) DeepCopyInto(out *AWSClusterSpec) { + *out = *in + in.NetworkSpec.DeepCopyInto(&out.NetworkSpec) + if in.SSHKeyName != nil { + in, out := &in.SSHKeyName, &out.SSHKeyName + *out = new(string) + **out = **in + } + out.ControlPlaneEndpoint = in.ControlPlaneEndpoint + if in.AdditionalTags != nil { + in, out := &in.AdditionalTags, &out.AdditionalTags + *out = make(Tags, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ControlPlaneLoadBalancer != nil { + in, out := &in.ControlPlaneLoadBalancer, &out.ControlPlaneLoadBalancer + *out = new(AWSLoadBalancerSpec) + (*in).DeepCopyInto(*out) + } + if in.SecondaryControlPlaneLoadBalancer != nil { + in, out := &in.SecondaryControlPlaneLoadBalancer, &out.SecondaryControlPlaneLoadBalancer + *out = new(AWSLoadBalancerSpec) + (*in).DeepCopyInto(*out) + } + in.Bastion.DeepCopyInto(&out.Bastion) + if in.IdentityRef != nil { + in, out := &in.IdentityRef, &out.IdentityRef + *out = new(AWSIdentityReference) + **out = **in + } + if in.S3Bucket != nil { + in, out := &in.S3Bucket, &out.S3Bucket + *out = new(S3Bucket) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterSpec. +func (in *AWSClusterSpec) DeepCopy() *AWSClusterSpec { + if in == nil { + return nil + } + out := new(AWSClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSClusterStaticIdentity) DeepCopyInto(out *AWSClusterStaticIdentity) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterStaticIdentity. +func (in *AWSClusterStaticIdentity) DeepCopy() *AWSClusterStaticIdentity { + if in == nil { + return nil + } + out := new(AWSClusterStaticIdentity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSClusterStaticIdentity) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSClusterStaticIdentityList) DeepCopyInto(out *AWSClusterStaticIdentityList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AWSClusterStaticIdentity, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterStaticIdentityList. +func (in *AWSClusterStaticIdentityList) DeepCopy() *AWSClusterStaticIdentityList { + if in == nil { + return nil + } + out := new(AWSClusterStaticIdentityList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSClusterStaticIdentityList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSClusterStaticIdentitySpec) DeepCopyInto(out *AWSClusterStaticIdentitySpec) { + *out = *in + in.AWSClusterIdentitySpec.DeepCopyInto(&out.AWSClusterIdentitySpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterStaticIdentitySpec. +func (in *AWSClusterStaticIdentitySpec) DeepCopy() *AWSClusterStaticIdentitySpec { + if in == nil { + return nil + } + out := new(AWSClusterStaticIdentitySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSClusterStatus) DeepCopyInto(out *AWSClusterStatus) { + *out = *in + in.Network.DeepCopyInto(&out.Network) + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = make(v1beta1.FailureDomains, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.Bastion != nil { + in, out := &in.Bastion, &out.Bastion + *out = new(Instance) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(v1beta1.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterStatus. +func (in *AWSClusterStatus) DeepCopy() *AWSClusterStatus { + if in == nil { + return nil + } + out := new(AWSClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSClusterTemplate) DeepCopyInto(out *AWSClusterTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterTemplate. +func (in *AWSClusterTemplate) DeepCopy() *AWSClusterTemplate { + if in == nil { + return nil + } + out := new(AWSClusterTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSClusterTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSClusterTemplateList) DeepCopyInto(out *AWSClusterTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AWSClusterTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterTemplateList. +func (in *AWSClusterTemplateList) DeepCopy() *AWSClusterTemplateList { + if in == nil { + return nil + } + out := new(AWSClusterTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSClusterTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSClusterTemplateResource) DeepCopyInto(out *AWSClusterTemplateResource) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterTemplateResource. +func (in *AWSClusterTemplateResource) DeepCopy() *AWSClusterTemplateResource { + if in == nil { + return nil + } + out := new(AWSClusterTemplateResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSClusterTemplateSpec) DeepCopyInto(out *AWSClusterTemplateSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClusterTemplateSpec. +func (in *AWSClusterTemplateSpec) DeepCopy() *AWSClusterTemplateSpec { + if in == nil { + return nil + } + out := new(AWSClusterTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSIdentityReference) DeepCopyInto(out *AWSIdentityReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSIdentityReference. +func (in *AWSIdentityReference) DeepCopy() *AWSIdentityReference { + if in == nil { + return nil + } + out := new(AWSIdentityReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSLoadBalancerSpec) DeepCopyInto(out *AWSLoadBalancerSpec) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(ELBScheme) + **out = **in + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.HealthCheckProtocol != nil { + in, out := &in.HealthCheckProtocol, &out.HealthCheckProtocol + *out = new(ELBProtocol) + **out = **in + } + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = new(TargetGroupHealthCheckAPISpec) + (*in).DeepCopyInto(*out) + } + if in.AdditionalSecurityGroups != nil { + in, out := &in.AdditionalSecurityGroups, &out.AdditionalSecurityGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AdditionalListeners != nil { + in, out := &in.AdditionalListeners, &out.AdditionalListeners + *out = make([]AdditionalListenerSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.IngressRules != nil { + in, out := &in.IngressRules, &out.IngressRules + *out = make([]IngressRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSLoadBalancerSpec. +func (in *AWSLoadBalancerSpec) DeepCopy() *AWSLoadBalancerSpec { + if in == nil { + return nil + } + out := new(AWSLoadBalancerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSMachine) DeepCopyInto(out *AWSMachine) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachine. +func (in *AWSMachine) DeepCopy() *AWSMachine { + if in == nil { + return nil + } + out := new(AWSMachine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSMachine) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSMachineList) DeepCopyInto(out *AWSMachineList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AWSMachine, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineList. +func (in *AWSMachineList) DeepCopy() *AWSMachineList { + if in == nil { + return nil + } + out := new(AWSMachineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSMachineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSMachineSpec) DeepCopyInto(out *AWSMachineSpec) { + *out = *in + if in.ProviderID != nil { + in, out := &in.ProviderID, &out.ProviderID + *out = new(string) + **out = **in + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceMetadataOptions != nil { + in, out := &in.InstanceMetadataOptions, &out.InstanceMetadataOptions + *out = new(InstanceMetadataOptions) + **out = **in + } + in.AMI.DeepCopyInto(&out.AMI) + if in.AdditionalTags != nil { + in, out := &in.AdditionalTags, &out.AdditionalTags + *out = make(Tags, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PublicIP != nil { + in, out := &in.PublicIP, &out.PublicIP + *out = new(bool) + **out = **in + } + if in.ElasticIPPool != nil { + in, out := &in.ElasticIPPool, &out.ElasticIPPool + *out = new(ElasticIPPool) + (*in).DeepCopyInto(*out) + } + if in.AdditionalSecurityGroups != nil { + in, out := &in.AdditionalSecurityGroups, &out.AdditionalSecurityGroups + *out = make([]AWSResourceReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Subnet != nil { + in, out := &in.Subnet, &out.Subnet + *out = new(AWSResourceReference) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupOverrides != nil { + in, out := &in.SecurityGroupOverrides, &out.SecurityGroupOverrides + *out = make(map[SecurityGroupRole]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.SSHKeyName != nil { + in, out := &in.SSHKeyName, &out.SSHKeyName + *out = new(string) + **out = **in + } + if in.RootVolume != nil { + in, out := &in.RootVolume, &out.RootVolume + *out = new(Volume) + (*in).DeepCopyInto(*out) + } + if in.NonRootVolumes != nil { + in, out := &in.NonRootVolumes, &out.NonRootVolumes + *out = make([]Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkInterfaces != nil { + in, out := &in.NetworkInterfaces, &out.NetworkInterfaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UncompressedUserData != nil { + in, out := &in.UncompressedUserData, &out.UncompressedUserData + *out = new(bool) + **out = **in + } + out.CloudInit = in.CloudInit + if in.Ignition != nil { + in, out := &in.Ignition, &out.Ignition + *out = new(Ignition) + (*in).DeepCopyInto(*out) + } + if in.SpotMarketOptions != nil { + in, out := &in.SpotMarketOptions, &out.SpotMarketOptions + *out = new(SpotMarketOptions) + (*in).DeepCopyInto(*out) + } + if in.PrivateDNSName != nil { + in, out := &in.PrivateDNSName, &out.PrivateDNSName + *out = new(PrivateDNSName) + (*in).DeepCopyInto(*out) + } + if in.CapacityReservationID != nil { + in, out := &in.CapacityReservationID, &out.CapacityReservationID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineSpec. +func (in *AWSMachineSpec) DeepCopy() *AWSMachineSpec { + if in == nil { + return nil + } + out := new(AWSMachineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSMachineStatus) DeepCopyInto(out *AWSMachineStatus) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]v1beta1.MachineAddress, len(*in)) + copy(*out, *in) + } + if in.InstanceState != nil { + in, out := &in.InstanceState, &out.InstanceState + *out = new(InstanceState) + **out = **in + } + if in.FailureReason != nil { + in, out := &in.FailureReason, &out.FailureReason + *out = new(errors.MachineStatusError) + **out = **in + } + if in.FailureMessage != nil { + in, out := &in.FailureMessage, &out.FailureMessage + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(v1beta1.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineStatus. +func (in *AWSMachineStatus) DeepCopy() *AWSMachineStatus { + if in == nil { + return nil + } + out := new(AWSMachineStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSMachineTemplate) DeepCopyInto(out *AWSMachineTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineTemplate. +func (in *AWSMachineTemplate) DeepCopy() *AWSMachineTemplate { + if in == nil { + return nil + } + out := new(AWSMachineTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSMachineTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSMachineTemplateList) DeepCopyInto(out *AWSMachineTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AWSMachineTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineTemplateList. +func (in *AWSMachineTemplateList) DeepCopy() *AWSMachineTemplateList { + if in == nil { + return nil + } + out := new(AWSMachineTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSMachineTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSMachineTemplateResource) DeepCopyInto(out *AWSMachineTemplateResource) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineTemplateResource. +func (in *AWSMachineTemplateResource) DeepCopy() *AWSMachineTemplateResource { + if in == nil { + return nil + } + out := new(AWSMachineTemplateResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSMachineTemplateSpec) DeepCopyInto(out *AWSMachineTemplateSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineTemplateSpec. +func (in *AWSMachineTemplateSpec) DeepCopy() *AWSMachineTemplateSpec { + if in == nil { + return nil + } + out := new(AWSMachineTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSMachineTemplateStatus) DeepCopyInto(out *AWSMachineTemplateStatus) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineTemplateStatus. +func (in *AWSMachineTemplateStatus) DeepCopy() *AWSMachineTemplateStatus { + if in == nil { + return nil + } + out := new(AWSMachineTemplateStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSManagedCluster) DeepCopyInto(out *AWSManagedCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedCluster. +func (in *AWSManagedCluster) DeepCopy() *AWSManagedCluster { + if in == nil { + return nil + } + out := new(AWSManagedCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSManagedCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSManagedClusterList) DeepCopyInto(out *AWSManagedClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AWSManagedCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedClusterList. +func (in *AWSManagedClusterList) DeepCopy() *AWSManagedClusterList { + if in == nil { + return nil + } + out := new(AWSManagedClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSManagedClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSManagedClusterSpec) DeepCopyInto(out *AWSManagedClusterSpec) { + *out = *in + out.ControlPlaneEndpoint = in.ControlPlaneEndpoint +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedClusterSpec. +func (in *AWSManagedClusterSpec) DeepCopy() *AWSManagedClusterSpec { + if in == nil { + return nil + } + out := new(AWSManagedClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSManagedClusterStatus) DeepCopyInto(out *AWSManagedClusterStatus) { + *out = *in + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = make(v1beta1.FailureDomains, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedClusterStatus. +func (in *AWSManagedClusterStatus) DeepCopy() *AWSManagedClusterStatus { + if in == nil { + return nil + } + out := new(AWSManagedClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSResourceReference) DeepCopyInto(out *AWSResourceReference) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = make([]Filter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceReference. +func (in *AWSResourceReference) DeepCopy() *AWSResourceReference { + if in == nil { + return nil + } + out := new(AWSResourceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSRoleSpec) DeepCopyInto(out *AWSRoleSpec) { + *out = *in + if in.PolicyARNs != nil { + in, out := &in.PolicyARNs, &out.PolicyARNs + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSRoleSpec. +func (in *AWSRoleSpec) DeepCopy() *AWSRoleSpec { + if in == nil { + return nil + } + out := new(AWSRoleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalListenerSpec) DeepCopyInto(out *AdditionalListenerSpec) { + *out = *in + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = new(TargetGroupHealthCheckAdditionalSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalListenerSpec. +func (in *AdditionalListenerSpec) DeepCopy() *AdditionalListenerSpec { + if in == nil { + return nil + } + out := new(AdditionalListenerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedNamespaces) DeepCopyInto(out *AllowedNamespaces) { + *out = *in + if in.NamespaceList != nil { + in, out := &in.NamespaceList, &out.NamespaceList + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.Selector.DeepCopyInto(&out.Selector) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedNamespaces. +func (in *AllowedNamespaces) DeepCopy() *AllowedNamespaces { + if in == nil { + return nil + } + out := new(AllowedNamespaces) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Bastion) DeepCopyInto(out *Bastion) { + *out = *in + if in.AllowedCIDRBlocks != nil { + in, out := &in.AllowedCIDRBlocks, &out.AllowedCIDRBlocks + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bastion. +func (in *Bastion) DeepCopy() *Bastion { + if in == nil { + return nil + } + out := new(Bastion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildParams) DeepCopyInto(out *BuildParams) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } + if in.Additional != nil { + in, out := &in.Additional, &out.Additional + *out = make(Tags, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildParams. +func (in *BuildParams) DeepCopy() *BuildParams { + if in == nil { + return nil + } + out := new(BuildParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CNIIngressRule) DeepCopyInto(out *CNIIngressRule) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CNIIngressRule. +func (in *CNIIngressRule) DeepCopy() *CNIIngressRule { + if in == nil { + return nil + } + out := new(CNIIngressRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in CNIIngressRules) DeepCopyInto(out *CNIIngressRules) { + { + in := &in + *out = make(CNIIngressRules, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CNIIngressRules. +func (in CNIIngressRules) DeepCopy() CNIIngressRules { + if in == nil { + return nil + } + out := new(CNIIngressRules) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CNISpec) DeepCopyInto(out *CNISpec) { + *out = *in + if in.CNIIngressRules != nil { + in, out := &in.CNIIngressRules, &out.CNIIngressRules + *out = make(CNIIngressRules, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CNISpec. +func (in *CNISpec) DeepCopy() *CNISpec { + if in == nil { + return nil + } + out := new(CNISpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClassicELBAttributes) DeepCopyInto(out *ClassicELBAttributes) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassicELBAttributes. +func (in *ClassicELBAttributes) DeepCopy() *ClassicELBAttributes { + if in == nil { + return nil + } + out := new(ClassicELBAttributes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClassicELBHealthCheck) DeepCopyInto(out *ClassicELBHealthCheck) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassicELBHealthCheck. +func (in *ClassicELBHealthCheck) DeepCopy() *ClassicELBHealthCheck { + if in == nil { + return nil + } + out := new(ClassicELBHealthCheck) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClassicELBListener) DeepCopyInto(out *ClassicELBListener) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassicELBListener. +func (in *ClassicELBListener) DeepCopy() *ClassicELBListener { + if in == nil { + return nil + } + out := new(ClassicELBListener) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudInit) DeepCopyInto(out *CloudInit) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudInit. +func (in *CloudInit) DeepCopy() *CloudInit { + if in == nil { + return nil + } + out := new(CloudInit) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticIPPool) DeepCopyInto(out *ElasticIPPool) { + *out = *in + if in.PublicIpv4Pool != nil { + in, out := &in.PublicIpv4Pool, &out.PublicIpv4Pool + *out = new(string) + **out = **in + } + if in.PublicIpv4PoolFallBackOrder != nil { + in, out := &in.PublicIpv4PoolFallBackOrder, &out.PublicIpv4PoolFallBackOrder + *out = new(PublicIpv4PoolFallbackOrder) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticIPPool. +func (in *ElasticIPPool) DeepCopy() *ElasticIPPool { + if in == nil { + return nil + } + out := new(ElasticIPPool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Filter) DeepCopyInto(out *Filter) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filter. +func (in *Filter) DeepCopy() *Filter { + if in == nil { + return nil + } + out := new(Filter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAMPool) DeepCopyInto(out *IPAMPool) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMPool. +func (in *IPAMPool) DeepCopy() *IPAMPool { + if in == nil { + return nil + } + out := new(IPAMPool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv6) DeepCopyInto(out *IPv6) { + *out = *in + if in.EgressOnlyInternetGatewayID != nil { + in, out := &in.EgressOnlyInternetGatewayID, &out.EgressOnlyInternetGatewayID + *out = new(string) + **out = **in + } + if in.IPAMPool != nil { + in, out := &in.IPAMPool, &out.IPAMPool + *out = new(IPAMPool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6. +func (in *IPv6) DeepCopy() *IPv6 { + if in == nil { + return nil + } + out := new(IPv6) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ignition) DeepCopyInto(out *Ignition) { + *out = *in + if in.Proxy != nil { + in, out := &in.Proxy, &out.Proxy + *out = new(IgnitionProxy) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(IgnitionTLS) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ignition. +func (in *Ignition) DeepCopy() *Ignition { + if in == nil { + return nil + } + out := new(Ignition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IgnitionProxy) DeepCopyInto(out *IgnitionProxy) { + *out = *in + if in.HTTPProxy != nil { + in, out := &in.HTTPProxy, &out.HTTPProxy + *out = new(string) + **out = **in + } + if in.HTTPSProxy != nil { + in, out := &in.HTTPSProxy, &out.HTTPSProxy + *out = new(string) + **out = **in + } + if in.NoProxy != nil { + in, out := &in.NoProxy, &out.NoProxy + *out = make([]IgnitionNoProxy, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IgnitionProxy. +func (in *IgnitionProxy) DeepCopy() *IgnitionProxy { + if in == nil { + return nil + } + out := new(IgnitionProxy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IgnitionTLS) DeepCopyInto(out *IgnitionTLS) { + *out = *in + if in.CASources != nil { + in, out := &in.CASources, &out.CASources + *out = make([]IgnitionCASource, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IgnitionTLS. +func (in *IgnitionTLS) DeepCopy() *IgnitionTLS { + if in == nil { + return nil + } + out := new(IgnitionTLS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressRule) DeepCopyInto(out *IngressRule) { + *out = *in + if in.CidrBlocks != nil { + in, out := &in.CidrBlocks, &out.CidrBlocks + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IPv6CidrBlocks != nil { + in, out := &in.IPv6CidrBlocks, &out.IPv6CidrBlocks + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SourceSecurityGroupIDs != nil { + in, out := &in.SourceSecurityGroupIDs, &out.SourceSecurityGroupIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SourceSecurityGroupRoles != nil { + in, out := &in.SourceSecurityGroupRoles, &out.SourceSecurityGroupRoles + *out = make([]SecurityGroupRole, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRule. +func (in *IngressRule) DeepCopy() *IngressRule { + if in == nil { + return nil + } + out := new(IngressRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in IngressRules) DeepCopyInto(out *IngressRules) { + { + in := &in + *out = make(IngressRules, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRules. +func (in IngressRules) DeepCopy() IngressRules { + if in == nil { + return nil + } + out := new(IngressRules) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Instance) DeepCopyInto(out *Instance) { + *out = *in + if in.SSHKeyName != nil { + in, out := &in.SSHKeyName, &out.SSHKeyName + *out = new(string) + **out = **in + } + if in.SecurityGroupIDs != nil { + in, out := &in.SecurityGroupIDs, &out.SecurityGroupIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(string) + **out = **in + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]v1beta1.MachineAddress, len(*in)) + copy(*out, *in) + } + if in.PrivateIP != nil { + in, out := &in.PrivateIP, &out.PrivateIP + *out = new(string) + **out = **in + } + if in.PublicIP != nil { + in, out := &in.PublicIP, &out.PublicIP + *out = new(string) + **out = **in + } + if in.ENASupport != nil { + in, out := &in.ENASupport, &out.ENASupport + *out = new(bool) + **out = **in + } + if in.EBSOptimized != nil { + in, out := &in.EBSOptimized, &out.EBSOptimized + *out = new(bool) + **out = **in + } + if in.RootVolume != nil { + in, out := &in.RootVolume, &out.RootVolume + *out = new(Volume) + (*in).DeepCopyInto(*out) + } + if in.NonRootVolumes != nil { + in, out := &in.NonRootVolumes, &out.NonRootVolumes + *out = make([]Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkInterfaces != nil { + in, out := &in.NetworkInterfaces, &out.NetworkInterfaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.SpotMarketOptions != nil { + in, out := &in.SpotMarketOptions, &out.SpotMarketOptions + *out = new(SpotMarketOptions) + (*in).DeepCopyInto(*out) + } + if in.VolumeIDs != nil { + in, out := &in.VolumeIDs, &out.VolumeIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.InstanceMetadataOptions != nil { + in, out := &in.InstanceMetadataOptions, &out.InstanceMetadataOptions + *out = new(InstanceMetadataOptions) + **out = **in + } + if in.PrivateDNSName != nil { + in, out := &in.PrivateDNSName, &out.PrivateDNSName + *out = new(PrivateDNSName) + (*in).DeepCopyInto(*out) + } + if in.PublicIPOnLaunch != nil { + in, out := &in.PublicIPOnLaunch, &out.PublicIPOnLaunch + *out = new(bool) + **out = **in + } + if in.CapacityReservationID != nil { + in, out := &in.CapacityReservationID, &out.CapacityReservationID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Instance. +func (in *Instance) DeepCopy() *Instance { + if in == nil { + return nil + } + out := new(Instance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceMetadataOptions) DeepCopyInto(out *InstanceMetadataOptions) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceMetadataOptions. +func (in *InstanceMetadataOptions) DeepCopy() *InstanceMetadataOptions { + if in == nil { + return nil + } + out := new(InstanceMetadataOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Listener) DeepCopyInto(out *Listener) { + *out = *in + in.TargetGroup.DeepCopyInto(&out.TargetGroup) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Listener. +func (in *Listener) DeepCopy() *Listener { + if in == nil { + return nil + } + out := new(Listener) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancer) DeepCopyInto(out *LoadBalancer) { + *out = *in + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SubnetIDs != nil { + in, out := &in.SubnetIDs, &out.SubnetIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecurityGroupIDs != nil { + in, out := &in.SecurityGroupIDs, &out.SecurityGroupIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ClassicELBListeners != nil { + in, out := &in.ClassicELBListeners, &out.ClassicELBListeners + *out = make([]ClassicELBListener, len(*in)) + copy(*out, *in) + } + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = new(ClassicELBHealthCheck) + **out = **in + } + out.ClassicElbAttributes = in.ClassicElbAttributes + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ELBListeners != nil { + in, out := &in.ELBListeners, &out.ELBListeners + *out = make([]Listener, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ELBAttributes != nil { + in, out := &in.ELBAttributes, &out.ELBAttributes + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancer. +func (in *LoadBalancer) DeepCopy() *LoadBalancer { + if in == nil { + return nil + } + out := new(LoadBalancer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { + *out = *in + in.VPC.DeepCopyInto(&out.VPC) + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make(Subnets, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CNI != nil { + in, out := &in.CNI, &out.CNI + *out = new(CNISpec) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupOverrides != nil { + in, out := &in.SecurityGroupOverrides, &out.SecurityGroupOverrides + *out = make(map[SecurityGroupRole]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AdditionalControlPlaneIngressRules != nil { + in, out := &in.AdditionalControlPlaneIngressRules, &out.AdditionalControlPlaneIngressRules + *out = make([]IngressRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. +func (in *NetworkSpec) DeepCopy() *NetworkSpec { + if in == nil { + return nil + } + out := new(NetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) { + *out = *in + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make(map[SecurityGroupRole]SecurityGroup, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + in.APIServerELB.DeepCopyInto(&out.APIServerELB) + in.SecondaryAPIServerELB.DeepCopyInto(&out.SecondaryAPIServerELB) + if in.NatGatewaysIPs != nil { + in, out := &in.NatGatewaysIPs, &out.NatGatewaysIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus. +func (in *NetworkStatus) DeepCopy() *NetworkStatus { + if in == nil { + return nil + } + out := new(NetworkStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateDNSName) DeepCopyInto(out *PrivateDNSName) { + *out = *in + if in.EnableResourceNameDNSAAAARecord != nil { + in, out := &in.EnableResourceNameDNSAAAARecord, &out.EnableResourceNameDNSAAAARecord + *out = new(bool) + **out = **in + } + if in.EnableResourceNameDNSARecord != nil { + in, out := &in.EnableResourceNameDNSARecord, &out.EnableResourceNameDNSARecord + *out = new(bool) + **out = **in + } + if in.HostnameType != nil { + in, out := &in.HostnameType, &out.HostnameType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateDNSName. +func (in *PrivateDNSName) DeepCopy() *PrivateDNSName { + if in == nil { + return nil + } + out := new(PrivateDNSName) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteTable) DeepCopyInto(out *RouteTable) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteTable. +func (in *RouteTable) DeepCopy() *RouteTable { + if in == nil { + return nil + } + out := new(RouteTable) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Bucket) DeepCopyInto(out *S3Bucket) { + *out = *in + if in.NodesIAMInstanceProfiles != nil { + in, out := &in.NodesIAMInstanceProfiles, &out.NodesIAMInstanceProfiles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PresignedURLDuration != nil { + in, out := &in.PresignedURLDuration, &out.PresignedURLDuration + *out = new(v1.Duration) + **out = **in + } + if in.BestEffortDeleteObjects != nil { + in, out := &in.BestEffortDeleteObjects, &out.BestEffortDeleteObjects + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Bucket. +func (in *S3Bucket) DeepCopy() *S3Bucket { + if in == nil { + return nil + } + out := new(S3Bucket) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroup) DeepCopyInto(out *SecurityGroup) { + *out = *in + if in.IngressRules != nil { + in, out := &in.IngressRules, &out.IngressRules + *out = make(IngressRules, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(Tags, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroup. +func (in *SecurityGroup) DeepCopy() *SecurityGroup { + if in == nil { + return nil + } + out := new(SecurityGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotMarketOptions) DeepCopyInto(out *SpotMarketOptions) { + *out = *in + if in.MaxPrice != nil { + in, out := &in.MaxPrice, &out.MaxPrice + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotMarketOptions. +func (in *SpotMarketOptions) DeepCopy() *SpotMarketOptions { + if in == nil { + return nil + } + out := new(SpotMarketOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetSpec) DeepCopyInto(out *SubnetSpec) { + *out = *in + if in.RouteTableID != nil { + in, out := &in.RouteTableID, &out.RouteTableID + *out = new(string) + **out = **in + } + if in.NatGatewayID != nil { + in, out := &in.NatGatewayID, &out.NatGatewayID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(Tags, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ZoneType != nil { + in, out := &in.ZoneType, &out.ZoneType + *out = new(ZoneType) + **out = **in + } + if in.ParentZoneName != nil { + in, out := &in.ParentZoneName, &out.ParentZoneName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetSpec. +func (in *SubnetSpec) DeepCopy() *SubnetSpec { + if in == nil { + return nil + } + out := new(SubnetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Subnets) DeepCopyInto(out *Subnets) { + { + in := &in + *out = make(Subnets, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subnets. +func (in Subnets) DeepCopy() Subnets { + if in == nil { + return nil + } + out := new(Subnets) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Tags) DeepCopyInto(out *Tags) { + { + in := &in + *out = make(Tags, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tags. +func (in Tags) DeepCopy() Tags { + if in == nil { + return nil + } + out := new(Tags) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupHealthCheck) DeepCopyInto(out *TargetGroupHealthCheck) { + *out = *in + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.IntervalSeconds != nil { + in, out := &in.IntervalSeconds, &out.IntervalSeconds + *out = new(int64) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + if in.ThresholdCount != nil { + in, out := &in.ThresholdCount, &out.ThresholdCount + *out = new(int64) + **out = **in + } + if in.UnhealthyThresholdCount != nil { + in, out := &in.UnhealthyThresholdCount, &out.UnhealthyThresholdCount + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupHealthCheck. +func (in *TargetGroupHealthCheck) DeepCopy() *TargetGroupHealthCheck { + if in == nil { + return nil + } + out := new(TargetGroupHealthCheck) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupHealthCheckAPISpec) DeepCopyInto(out *TargetGroupHealthCheckAPISpec) { + *out = *in + if in.IntervalSeconds != nil { + in, out := &in.IntervalSeconds, &out.IntervalSeconds + *out = new(int64) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + if in.ThresholdCount != nil { + in, out := &in.ThresholdCount, &out.ThresholdCount + *out = new(int64) + **out = **in + } + if in.UnhealthyThresholdCount != nil { + in, out := &in.UnhealthyThresholdCount, &out.UnhealthyThresholdCount + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupHealthCheckAPISpec. +func (in *TargetGroupHealthCheckAPISpec) DeepCopy() *TargetGroupHealthCheckAPISpec { + if in == nil { + return nil + } + out := new(TargetGroupHealthCheckAPISpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupHealthCheckAdditionalSpec) DeepCopyInto(out *TargetGroupHealthCheckAdditionalSpec) { + *out = *in + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(string) + **out = **in + } + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.IntervalSeconds != nil { + in, out := &in.IntervalSeconds, &out.IntervalSeconds + *out = new(int64) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + if in.ThresholdCount != nil { + in, out := &in.ThresholdCount, &out.ThresholdCount + *out = new(int64) + **out = **in + } + if in.UnhealthyThresholdCount != nil { + in, out := &in.UnhealthyThresholdCount, &out.UnhealthyThresholdCount + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupHealthCheckAdditionalSpec. +func (in *TargetGroupHealthCheckAdditionalSpec) DeepCopy() *TargetGroupHealthCheckAdditionalSpec { + if in == nil { + return nil + } + out := new(TargetGroupHealthCheckAdditionalSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupSpec) DeepCopyInto(out *TargetGroupSpec) { + *out = *in + if in.HealthCheck != nil { + in, out := &in.HealthCheck, &out.HealthCheck + *out = new(TargetGroupHealthCheck) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupSpec. +func (in *TargetGroupSpec) DeepCopy() *TargetGroupSpec { + if in == nil { + return nil + } + out := new(TargetGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPCSpec) DeepCopyInto(out *VPCSpec) { + *out = *in + if in.IPAMPool != nil { + in, out := &in.IPAMPool, &out.IPAMPool + *out = new(IPAMPool) + **out = **in + } + if in.IPv6 != nil { + in, out := &in.IPv6, &out.IPv6 + *out = new(IPv6) + (*in).DeepCopyInto(*out) + } + if in.InternetGatewayID != nil { + in, out := &in.InternetGatewayID, &out.InternetGatewayID + *out = new(string) + **out = **in + } + if in.CarrierGatewayID != nil { + in, out := &in.CarrierGatewayID, &out.CarrierGatewayID + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(Tags, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AvailabilityZoneUsageLimit != nil { + in, out := &in.AvailabilityZoneUsageLimit, &out.AvailabilityZoneUsageLimit + *out = new(int) + **out = **in + } + if in.AvailabilityZoneSelection != nil { + in, out := &in.AvailabilityZoneSelection, &out.AvailabilityZoneSelection + *out = new(AZSelectionScheme) + **out = **in + } + if in.PrivateDNSHostnameTypeOnLaunch != nil { + in, out := &in.PrivateDNSHostnameTypeOnLaunch, &out.PrivateDNSHostnameTypeOnLaunch + *out = new(string) + **out = **in + } + if in.ElasticIPPool != nil { + in, out := &in.ElasticIPPool, &out.ElasticIPPool + *out = new(ElasticIPPool) + (*in).DeepCopyInto(*out) + } + if in.SubnetSchema != nil { + in, out := &in.SubnetSchema, &out.SubnetSchema + *out = new(SubnetSchemaType) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCSpec. +func (in *VPCSpec) DeepCopy() *VPCSpec { + if in == nil { + return nil + } + out := new(VPCSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Volume) DeepCopyInto(out *Volume) { + *out = *in + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(int64) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. +func (in *Volume) DeepCopy() *Volume { + if in == nil { + return nil + } + out := new(Volume) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/zz_generated.defaults.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/zz_generated.defaults.go new file mode 100644 index 000000000..506e7e780 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2/zz_generated.defaults.go @@ -0,0 +1,57 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1beta2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&AWSCluster{}, func(obj interface{}) { SetObjectDefaults_AWSCluster(obj.(*AWSCluster)) }) + scheme.AddTypeDefaultingFunc(&AWSClusterTemplate{}, func(obj interface{}) { SetObjectDefaults_AWSClusterTemplate(obj.(*AWSClusterTemplate)) }) + scheme.AddTypeDefaultingFunc(&AWSMachine{}, func(obj interface{}) { SetObjectDefaults_AWSMachine(obj.(*AWSMachine)) }) + scheme.AddTypeDefaultingFunc(&AWSMachineTemplate{}, func(obj interface{}) { SetObjectDefaults_AWSMachineTemplate(obj.(*AWSMachineTemplate)) }) + return nil +} + +func SetObjectDefaults_AWSCluster(in *AWSCluster) { + SetDefaults_AWSClusterSpec(&in.Spec) + SetDefaults_NetworkSpec(&in.Spec.NetworkSpec) + SetDefaults_Bastion(&in.Spec.Bastion) +} + +func SetObjectDefaults_AWSClusterTemplate(in *AWSClusterTemplate) { + SetDefaults_AWSClusterSpec(&in.Spec.Template.Spec) + SetDefaults_NetworkSpec(&in.Spec.Template.Spec.NetworkSpec) + SetDefaults_Bastion(&in.Spec.Template.Spec.Bastion) +} + +func SetObjectDefaults_AWSMachine(in *AWSMachine) { + SetDefaults_AWSMachineSpec(&in.Spec) +} + +func SetObjectDefaults_AWSMachineTemplate(in *AWSMachineTemplate) { + SetDefaults_AWSMachineSpec(&in.Spec.Template.Spec) +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/feature/feature.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/feature/feature.go new file mode 100644 index 000000000..061e4edd5 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/feature/feature.go @@ -0,0 +1,111 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package feature provides a feature-gate implementation for capa. +package feature + +import ( + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/component-base/featuregate" +) + +const ( + // Every capa-specific feature gate should add method here following this template: + // + // // owner: @username + // // alpha: v1.X + // MyFeature featuregate.Feature = "MyFeature". + + // EKS is used to enable EKS support + // owner: @richardcase + // alpha: v0.4 + EKS featuregate.Feature = "EKS" + + // EKSEnableIAM will enable the IAM resource creation/modification + // owner: @richardcase + // alpha: v0.4 + EKSEnableIAM featuregate.Feature = "EKSEnableIAM" + + // EKSAllowAddRoles is used to enable the usage of additional IAM roles + // owner: @richardcase + // alpha: v0.4 + EKSAllowAddRoles featuregate.Feature = "EKSAllowAddRoles" + + // EKSFargate is used to enable the usage of EKS fargate profiles + // owner: @richardcase + // alpha: v0.4 + EKSFargate featuregate.Feature = "EKSFargate" + + // MachinePool is used to enable ASG support + // owner: @mytunguyen + // alpha: v0.1 + MachinePool featuregate.Feature = "MachinePool" + + // EventBridgeInstanceState will use Event Bridge and notifications to keep instance state up-to-date + // owner: @gab-satchi + // alpha: v0.7? + EventBridgeInstanceState featuregate.Feature = "EventBridgeInstanceState" + + // AutoControllerIdentityCreator will create AWSClusterControllerIdentity instance that allows all namespaces to use it. + // owner: @sedefsavas + // alpha: v0.6 + AutoControllerIdentityCreator featuregate.Feature = "AutoControllerIdentityCreator" + + // BootstrapFormatIgnition will allow an user to enable alternate machine bootstrap format, viz. Ignition. + BootstrapFormatIgnition featuregate.Feature = "BootstrapFormatIgnition" + + // ExternalResourceGC is used to enable the garbage collection of external resources like NLB/ALB on deletion + // owner: @richardcase + // alpha: v1.5 + ExternalResourceGC featuregate.Feature = "ExternalResourceGC" + + // AlternativeGCStrategy is used to enable garbage collection of external resources to be performed without resource group tagging API. It is usually needed in airgap env when tagging API is not available. + // owner: @wyike + // alpha: v2.0 + AlternativeGCStrategy featuregate.Feature = "AlternativeGCStrategy" + + // TagUnmanagedNetworkResources is used to disable tagging unmanaged networking resources. + // owner: @skarlso + // alpha: v2.0 + TagUnmanagedNetworkResources featuregate.Feature = "TagUnmanagedNetworkResources" + + // ROSA is used to enable ROSA support + // owner: @enxebre + // alpha: v2.2 + ROSA featuregate.Feature = "ROSA" +) + +func init() { + runtime.Must(MutableGates.Add(defaultCAPAFeatureGates)) +} + +// defaultCAPAFeatureGates consists of all known capa-specific feature keys. +// To add a new feature, define a key for it above and add it here. +var defaultCAPAFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ + // Every feature should be initiated here: + EKS: {Default: true, PreRelease: featuregate.Beta}, + EKSEnableIAM: {Default: false, PreRelease: featuregate.Beta}, + EKSAllowAddRoles: {Default: false, PreRelease: featuregate.Beta}, + EKSFargate: {Default: false, PreRelease: featuregate.Alpha}, + EventBridgeInstanceState: {Default: false, PreRelease: featuregate.Alpha}, + MachinePool: {Default: true, PreRelease: featuregate.Beta}, + AutoControllerIdentityCreator: {Default: true, PreRelease: featuregate.Alpha}, + BootstrapFormatIgnition: {Default: false, PreRelease: featuregate.Alpha}, + ExternalResourceGC: {Default: false, PreRelease: featuregate.Alpha}, + AlternativeGCStrategy: {Default: false, PreRelease: featuregate.Alpha}, + TagUnmanagedNetworkResources: {Default: true, PreRelease: featuregate.Alpha}, + ROSA: {Default: false, PreRelease: featuregate.Alpha}, +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/feature/gates.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/feature/gates.go new file mode 100644 index 000000000..b3576c313 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api-provider-aws/v2/feature/gates.go @@ -0,0 +1,35 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package feature + +import ( + "k8s.io/component-base/featuregate" + + "sigs.k8s.io/cluster-api/feature" +) + +var ( + // MutableGates is a mutable version of DefaultFeatureGate. + // Only top-level commands/options setup and the k8s.io/component-base/featuregate/testing package should make use of this. + // Tests that need to modify featuregate gates for the duration of their test should use: + // defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features., )() + MutableGates featuregate.MutableFeatureGate = feature.MutableGates + + // Gates is a shared global FeatureGate. + // Top-level commands/options setup that needs to modify this featuregate gate should use DefaultMutableFeatureGate. + Gates featuregate.FeatureGate = MutableGates +)