From fdb4c39dccdd130d5e993a287ea58e72338cd4d2 Mon Sep 17 00:00:00 2001 From: Ilya Alekseyev Date: Tue, 8 Aug 2023 06:06:20 -0400 Subject: [PATCH] E2E for Cilium CNI without Kubeproxy - add manifest scripts - change makefile --- Makefile | 47 +- .../data/cni/cilium/cilium-no-kubeproxy.yaml | 1056 +++++++++++++++++ .../cluster-template/kustomization.yaml | 5 + .../v1alpha4/no-kubeproxy/no-kubeproxy.yaml | 23 + .../kustomization.yaml | 5 + .../cluster-template-ccm/kustomization.yaml | 5 + .../cluster-template-csi/kustomization.yaml | 5 + .../kustomization.yaml | 5 + .../kustomization.yaml | 5 + .../kustomization.yaml | 5 + .../kustomization.yaml | 5 + .../kustomization.yaml | 5 + .../kustomization.yaml | 5 + .../kustomization.yaml | 5 + .../kustomization.yaml | 5 + .../cluster-template/kustomization.yaml | 5 + .../v1beta1/no-kubeproxy/no-kubeproxy.yaml | 23 + 17 files changed, 1212 insertions(+), 2 deletions(-) create mode 100644 test/e2e/data/cni/cilium/cilium-no-kubeproxy.yaml create mode 100644 test/e2e/data/infrastructure-nutanix/v1alpha4/no-kubeproxy/cluster-template/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-nutanix/v1alpha4/no-kubeproxy/no-kubeproxy.yaml create mode 100644 test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-additional-categories/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-ccm/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-csi/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-kcp-remediation/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-kcp-scale-in/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-md-remediation/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-no-nmt/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-no-nutanix-cluster/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-no-secret/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-project/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-upgrades/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/no-kubeproxy.yaml diff --git a/Makefile b/Makefile index 09e9f41058..687e2c20da 100644 --- a/Makefile +++ b/Makefile @@ -54,6 +54,7 @@ export PATH := $(abspath $(TOOLS_BIN_DIR)):$(PATH) CNI_PATH_CALICO ?= "${E2E_DIR}/data/cni/calico/calico.yaml" CNI_PATH_FLANNEL ?= "${E2E_DIR}/data/cni/flannel/flannel.yaml" # From https://github.com/flannel-io/flannel/blob/master/Documentation/kube-flannel.yml CNI_PATH_CILIUM ?= "${E2E_DIR}/data/cni/cilium/cilium.yaml" # helm template cilium cilium/cilium --version 1.13.0 -n kube-system --set hubble.enabled=false | sed 's/${BIN_PATH}/$BIN_PATH/g' +CNI_PATH_CILIUM_NO_KUBEPROXY ?= "${E2E_DIR}/data/cni/cilium/cilium-no-kubeproxy.yaml" # helm template cilium cilium/cilium --version 1.13.0 -n kube-system --set hubble.enabled=false --set kubeProxyReplacement=strict | sed 's/${BIN_PATH}/$BIN_PATH/g' # # Binaries. @@ -302,6 +303,24 @@ cluster-e2e-templates-v1beta1: $(KUSTOMIZE) ## Generate cluster templates for v1 $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-kcp-scale-in --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-kcp-scale-in.yaml $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-csi --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-csi.yaml +cluster-e2e-templates-no-kubeproxy: $(KUSTOMIZE) ##Generate cluster templates without kubeproxy + # v1alpha4 + $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1alpha4/no-kubeproxy/cluster-template --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1alpha4/cluster-template.yaml + + # v1beta1 + $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template.yaml + $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-no-secret --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-no-secret.yaml + $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-no-nutanix-cluster --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-no-nutanix-cluster.yaml + $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-additional-categories --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-additional-categories.yaml + $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-no-nmt --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-no-nmt.yaml + $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-project --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-project.yaml + $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-ccm --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-ccm.yaml + $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-upgrades --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-upgrades.yaml + $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-md-remediation --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-md-remediation.yaml + $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-kcp-remediation --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-kcp-remediation.yaml + $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-kcp-scale-in --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-kcp-scale-in.yaml + $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-csi --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-csi.yaml + cluster-templates: $(KUSTOMIZE) ## Generate cluster templates for all flavors $(KUSTOMIZE) build $(TEMPLATES_DIR)/base > $(TEMPLATES_DIR)/cluster-template.yaml $(KUSTOMIZE) build $(TEMPLATES_DIR)/csi > $(TEMPLATES_DIR)/cluster-template-csi.yaml @@ -395,6 +414,27 @@ test-e2e: docker-build-e2e $(GINKGO_BIN) cluster-e2e-templates cluster-templates -e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) \ -e2e.use-existing-cluster=$(USE_EXISTING_CLUSTER) +.PHONY: test-e2e-no-kubeproxy +test-e2e-no-kubeproxy: docker-build-e2e $(GINKGO_BIN) cluster-e2e-templates-no-kubeproxy cluster-templates ## Run the end-to-end tests without kubeproxy + mkdir -p $(ARTIFACTS) + NUTANIX_LOG_LEVEL=debug $(GINKGO) -v \ + --trace \ + --progress \ + --tags=e2e \ + --label-filter=$(LABEL_FILTER_ARGS) \ + $(_SKIP_ARGS) \ + --nodes=$(GINKGO_NODES) \ + --no-color=$(GINKGO_NOCOLOR) \ + --output-dir="$(ARTIFACTS)" \ + --junit-report=${JUNIT_REPORT_FILE} \ + --timeout="24h" \ + --always-emit-ginkgo-writer \ + $(GINKGO_ARGS) ./test/e2e -- \ + -e2e.artifacts-folder="$(ARTIFACTS)" \ + -e2e.config="$(E2E_CONF_FILE)" \ + -e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) \ + -e2e.use-existing-cluster=$(USE_EXISTING_CLUSTER) + .PHONY: list-e2e list-e2e: docker-build-e2e $(GINKGO_BIN) cluster-e2e-templates cluster-templates ## Run the end-to-end tests mkdir -p $(ARTIFACTS) @@ -409,7 +449,6 @@ list-e2e: docker-build-e2e $(GINKGO_BIN) cluster-e2e-templates cluster-templates test-e2e-calico: CNI=$(CNI_PATH_CALICO) $(MAKE) test-e2e - .PHONY: test-e2e-flannel test-e2e-flannel: CNI=$(CNI_PATH_FLANNEL) $(MAKE) test-e2e @@ -418,8 +457,12 @@ test-e2e-flannel: test-e2e-cilium: CNI=$(CNI_PATH_CILIUM) $(MAKE) test-e2e +.PHONY: test-e2e-cilium-no-kubeproxy +test-e2e-cilium-no-kubeproxy: + CNI=$(CNI_PATH_CILIUM_NO_KUBEPROXY) $(MAKE) test-e2e-no-kubeproxy + .PHONY: test-e2e-all-cni -test-e2e-all-cni: test-e2e test-e2e-calico test-e2e-flannel test-e2e-cilium +test-e2e-all-cni: test-e2e test-e2e-calico test-e2e-flannel test-e2e-cilium test-e2e-cilium-no-kubeproxy ## -------------------------------------- diff --git a/test/e2e/data/cni/cilium/cilium-no-kubeproxy.yaml b/test/e2e/data/cni/cilium/cilium-no-kubeproxy.yaml new file mode 100644 index 0000000000..598b4c6122 --- /dev/null +++ b/test/e2e/data/cni/cilium/cilium-no-kubeproxy.yaml @@ -0,0 +1,1056 @@ +--- +# Source: cilium/templates/cilium-agent/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "cilium" + namespace: kube-system +--- +# Source: cilium/templates/cilium-operator/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "cilium-operator" + namespace: kube-system +--- +# Source: cilium/templates/cilium-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: cilium-config + namespace: kube-system +data: + + # Identity allocation mode selects how identities are shared between cilium + # nodes by setting how they are stored. The options are "crd" or "kvstore". + # - "crd" stores identities in kubernetes as CRDs (custom resource definition). + # These can be queried with: + # kubectl get ciliumid + # - "kvstore" stores identities in an etcd kvstore, that is + # configured below. Cilium versions before 1.6 supported only the kvstore + # backend. Upgrades from these older cilium versions should continue using + # the kvstore by commenting out the identity-allocation-mode below, or + # setting it to "kvstore". + identity-allocation-mode: crd + identity-heartbeat-timeout: "30m0s" + identity-gc-interval: "15m0s" + cilium-endpoint-gc-interval: "5m0s" + nodes-gc-interval: "5m0s" + skip-cnp-status-startup-clean: "false" + # Disable the usage of CiliumEndpoint CRD + disable-endpoint-crd: "false" + + # If you want to run cilium in debug mode change this value to true + debug: "false" + debug-verbose: "" + # The agent can be put into the following three policy enforcement modes + # default, always and never. + # https://docs.cilium.io/en/latest/security/policy/intro/#policy-enforcement-modes + enable-policy: "default" + + # Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4 + # address. + enable-ipv4: "true" + + # Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6 + # address. + enable-ipv6: "false" + # Users who wish to specify their own custom CNI configuration file must set + # custom-cni-conf to "true", otherwise Cilium may overwrite the configuration. + custom-cni-conf: "false" + enable-bpf-clock-probe: "true" + # If you want cilium monitor to aggregate tracing for packets, set this level + # to "low", "medium", or "maximum". The higher the level, the less packets + # that will be seen in monitor output. + monitor-aggregation: medium + + # The monitor aggregation interval governs the typical time between monitor + # notification events for each allowed connection. + # + # Only effective when monitor aggregation is set to "medium" or higher. + monitor-aggregation-interval: "5s" + + # The monitor aggregation flags determine which TCP flags which, upon the + # first observation, cause monitor notifications to be generated. + # + # Only effective when monitor aggregation is set to "medium" or higher. + monitor-aggregation-flags: all + # Specifies the ratio (0.0-1.0] of total system memory to use for dynamic + # sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. + bpf-map-dynamic-size-ratio: "0.0025" + # bpf-policy-map-max specifies the maximum number of entries in endpoint + # policy map (per endpoint) + bpf-policy-map-max: "16384" + # bpf-lb-map-max specifies the maximum number of entries in bpf lb service, + # backend and affinity maps. + bpf-lb-map-max: "65536" + bpf-lb-external-clusterip: "false" + + # Pre-allocation of map entries allows per-packet latency to be reduced, at + # the expense of up-front memory allocation for the entries in the maps. The + # default value below will minimize memory usage in the default installation; + # users who are sensitive to latency may consider setting this to "true". + # + # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore + # this option and behave as though it is set to "true". + # + # If this value is modified, then during the next Cilium startup the restore + # of existing endpoints and tracking of ongoing connections may be disrupted. + # As a result, reply packets may be dropped and the load-balancing decisions + # for established connections may change. + # + # If this option is set to "false" during an upgrade from 1.3 or earlier to + # 1.4 or later, then it may cause one-time disruptions during the upgrade. + preallocate-bpf-maps: "false" + + # Regular expression matching compatible Istio sidecar istio-proxy + # container image names + sidecar-istio-proxy-image: "cilium/istio_proxy" + + # Name of the cluster. Only relevant when building a mesh of clusters. + cluster-name: default + # Unique ID of the cluster. Must be unique across all conneted clusters and + # in the range of 1 and 255. Only relevant when building a mesh of clusters. + cluster-id: "0" + + # Encapsulation mode for communication between nodes + # Possible values: + # - disabled + # - vxlan (default) + # - geneve + tunnel: "vxlan" + + + # Enables L7 proxy for L7 policy enforcement and visibility + enable-l7-proxy: "true" + + enable-ipv4-masquerade: "true" + enable-ipv6-big-tcp: "false" + enable-ipv6-masquerade: "true" + + enable-xt-socket-fallback: "true" + install-iptables-rules: "true" + install-no-conntrack-iptables-rules: "false" + + auto-direct-node-routes: "false" + enable-local-redirect-policy: "false" + + kube-proxy-replacement: "true" + bpf-lb-sock: "false" + enable-health-check-nodeport: "true" + node-port-bind-protection: "true" + enable-auto-protect-node-port-range: "true" + enable-svc-source-range-check: "true" + enable-l2-neigh-discovery: "true" + arping-refresh-period: "30s" + enable-endpoint-health-checking: "true" + enable-health-checking: "true" + enable-well-known-identities: "false" + enable-remote-node-identity: "true" + synchronize-k8s-nodes: "true" + operator-api-serve-addr: "127.0.0.1:9234" + ipam: "cluster-pool" + cluster-pool-ipv4-cidr: "10.0.0.0/8" + cluster-pool-ipv4-mask-size: "24" + disable-cnp-status-updates: "true" + enable-vtep: "false" + vtep-endpoint: "" + vtep-cidr: "" + vtep-mask: "" + vtep-mac: "" + enable-bgp-control-plane: "false" + procfs: "/host/proc" + bpf-root: "/sys/fs/bpf" + cgroup-root: "/run/cilium/cgroupv2" + enable-k8s-terminating-endpoint: "true" + enable-sctp: "false" + remove-cilium-node-taints: "true" + set-cilium-is-up-condition: "true" + unmanaged-pod-watcher-interval: "15" + tofqdns-dns-reject-response-code: "refused" + tofqdns-enable-dns-compression: "true" + tofqdns-endpoint-max-ip-per-hostname: "50" + tofqdns-idle-connection-grace-period: "0s" + tofqdns-max-deferred-connection-deletes: "10000" + tofqdns-min-ttl: "3600" + tofqdns-proxy-response-max-delay: "100ms" + agent-not-ready-taint-key: "node.cilium.io/agent-not-ready" +--- +# Source: cilium/templates/cilium-agent/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium + labels: + app.kubernetes.io/part-of: cilium +rules: +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + - services + - pods + - endpoints + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - watch + # This is used when validating policies in preflight. This will need to stay + # until we figure out how to avoid "get" inside the preflight, and then + # should be removed ideally. + - get +- apiGroups: + - cilium.io + resources: + - ciliumloadbalancerippools + - ciliumbgppeeringpolicies + - ciliumclusterwideenvoyconfigs + - ciliumclusterwidenetworkpolicies + - ciliumegressgatewaypolicies + - ciliumendpoints + - ciliumendpointslices + - ciliumenvoyconfigs + - ciliumidentities + - ciliumlocalredirectpolicies + - ciliumnetworkpolicies + - ciliumnodes + - ciliumnodeconfigs + verbs: + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumidentities + - ciliumendpoints + - ciliumnodes + verbs: + - create +- apiGroups: + - cilium.io + # To synchronize garbage collection of such resources + resources: + - ciliumidentities + verbs: + - update +- apiGroups: + - cilium.io + resources: + - ciliumendpoints + verbs: + - delete + - get +- apiGroups: + - cilium.io + resources: + - ciliumnodes + - ciliumnodes/status + verbs: + - get + - update +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies/status + - ciliumendpoints/status + - ciliumendpoints + verbs: + - patch +--- +# Source: cilium/templates/cilium-operator/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium-operator + labels: + app.kubernetes.io/part-of: cilium +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + # to automatically delete [core|kube]dns pods so that are starting to being + # managed by Cilium + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + # To remove node taints + - nodes + # To set NetworkUnavailable false on startup + - nodes/status + verbs: + - patch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + # to perform LB IP allocation for BGP + - services/status + verbs: + - update + - patch +- apiGroups: + - "" + resources: + # to check apiserver connectivity + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + # to perform the translation of a CNP that contains `ToGroup` to its endpoints + - services + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumclusterwidenetworkpolicies + verbs: + # Create auto-generated CNPs and CCNPs from Policies that have 'toGroups' + - create + - update + - deletecollection + # To update the status of the CNPs and CCNPs + - patch + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies/status + verbs: + # Update the auto-generated CNPs and CCNPs status. + - patch + - update +- apiGroups: + - cilium.io + resources: + - ciliumendpoints + - ciliumidentities + verbs: + # To perform garbage collection of such resources + - delete + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumidentities + verbs: + # To synchronize garbage collection of such resources + - update +- apiGroups: + - cilium.io + resources: + - ciliumnodes + verbs: + - create + - update + - get + - list + - watch + # To perform CiliumNode garbage collector + - delete +- apiGroups: + - cilium.io + resources: + - ciliumnodes/status + verbs: + - update +- apiGroups: + - cilium.io + resources: + - ciliumendpointslices + - ciliumenvoyconfigs + verbs: + - create + - update + - get + - list + - watch + - delete + - patch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - list + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - update + resourceNames: + - ciliumloadbalancerippools.cilium.io + - ciliumbgppeeringpolicies.cilium.io + - ciliumclusterwideenvoyconfigs.cilium.io + - ciliumclusterwidenetworkpolicies.cilium.io + - ciliumegressgatewaypolicies.cilium.io + - ciliumendpoints.cilium.io + - ciliumendpointslices.cilium.io + - ciliumenvoyconfigs.cilium.io + - ciliumexternalworkloads.cilium.io + - ciliumidentities.cilium.io + - ciliumlocalredirectpolicies.cilium.io + - ciliumnetworkpolicies.cilium.io + - ciliumnodes.cilium.io + - ciliumnodeconfigs.cilium.io +- apiGroups: + - cilium.io + resources: + - ciliumloadbalancerippools + verbs: + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumloadbalancerippools/status + verbs: + - patch +# For cilium-operator running in HA mode. +# +# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election +# between multiple running instances. +# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less +# common and fewer objects in the cluster watch "all Leases". +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +--- +# Source: cilium/templates/cilium-agent/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium + labels: + app.kubernetes.io/part-of: cilium +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium +subjects: +- kind: ServiceAccount + name: "cilium" + namespace: kube-system +--- +# Source: cilium/templates/cilium-operator/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium-operator + labels: + app.kubernetes.io/part-of: cilium +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium-operator +subjects: +- kind: ServiceAccount + name: "cilium-operator" + namespace: kube-system +--- +# Source: cilium/templates/cilium-agent/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cilium-config-agent + namespace: kube-system + labels: + app.kubernetes.io/part-of: cilium +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +--- +# Source: cilium/templates/cilium-agent/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cilium-config-agent + namespace: kube-system + labels: + app.kubernetes.io/part-of: cilium +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-config-agent +subjects: + - kind: ServiceAccount + name: "cilium" + namespace: kube-system +--- +# Source: cilium/templates/cilium-agent/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cilium + namespace: kube-system + labels: + k8s-app: cilium + app.kubernetes.io/part-of: cilium + app.kubernetes.io/name: cilium-agent +spec: + selector: + matchLabels: + k8s-app: cilium + updateStrategy: + rollingUpdate: + maxUnavailable: 2 + type: RollingUpdate + template: + metadata: + annotations: + # Set app AppArmor's profile to "unconfined". The value of this annotation + # can be modified as long users know which profiles they have available + # in AppArmor. + container.apparmor.security.beta.kubernetes.io/cilium-agent: "unconfined" + container.apparmor.security.beta.kubernetes.io/clean-cilium-state: "unconfined" + container.apparmor.security.beta.kubernetes.io/mount-cgroup: "unconfined" + container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: "unconfined" + labels: + k8s-app: cilium + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium + spec: + containers: + - name: cilium-agent + image: "quay.io/cilium/cilium:v1.13.0@sha256:6544a3441b086a2e09005d3e21d1a4afb216fae19c5a60b35793c8a9438f8f68" + imagePullPolicy: IfNotPresent + command: + - cilium-agent + args: + - --config-dir=/tmp/cilium/config-map + startupProbe: + httpGet: + host: "127.0.0.1" + path: /healthz + port: 9879 + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + failureThreshold: 105 + periodSeconds: 2 + successThreshold: 1 + livenessProbe: + httpGet: + host: "127.0.0.1" + path: /healthz + port: 9879 + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + periodSeconds: 30 + successThreshold: 1 + failureThreshold: 10 + timeoutSeconds: 5 + readinessProbe: + httpGet: + host: "127.0.0.1" + path: /healthz + port: 9879 + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + periodSeconds: 30 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 5 + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ + - name: CILIUM_CNI_CHAINING_MODE + valueFrom: + configMapKeyRef: + name: cilium-config + key: cni-chaining-mode + optional: true + - name: CILIUM_CUSTOM_CNI_CONF + valueFrom: + configMapKeyRef: + name: cilium-config + key: custom-cni-conf + optional: true + lifecycle: + postStart: + exec: + command: + - "/cni-install.sh" + - "--enable-debug=false" + - "--cni-exclusive=true" + - "--log-file=/var/run/cilium/cilium-cni.log" + preStop: + exec: + command: + - /cni-uninstall.sh + securityContext: + seLinuxOptions: + level: s0 + type: spc_t + capabilities: + add: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + drop: + - ALL + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + # Unprivileged containers need to mount /proc/sys/net from the host + # to have write access + - mountPath: /host/proc/sys/net + name: host-proc-sys-net + # Unprivileged containers need to mount /proc/sys/kernel from the host + # to have write access + - mountPath: /host/proc/sys/kernel + name: host-proc-sys-kernel + - name: bpf-maps + mountPath: /sys/fs/bpf + # Unprivileged containers can't set mount propagation to bidirectional + # in this case we will mount the bpf fs from an init container that + # is privileged and set the mount propagation from host to container + # in Cilium. + mountPropagation: HostToContainer + - name: cilium-run + mountPath: /var/run/cilium + - name: cni-path + mountPath: /host/opt/cni/bin + - name: etc-cni-netd + mountPath: /host/etc/cni/net.d + - name: clustermesh-secrets + mountPath: /var/lib/cilium/clustermesh + readOnly: true + # Needed to be able to load kernel modules + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock + - name: tmp + mountPath: /tmp + initContainers: + - name: config + image: "quay.io/cilium/cilium:v1.13.0@sha256:6544a3441b086a2e09005d3e21d1a4afb216fae19c5a60b35793c8a9438f8f68" + imagePullPolicy: IfNotPresent + command: + - cilium + - build-config + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + volumeMounts: + - name: tmp + mountPath: /tmp + terminationMessagePolicy: FallbackToLogsOnError + # Required to mount cgroup2 filesystem on the underlying Kubernetes node. + # We use nsenter command with host's cgroup and mount namespaces enabled. + - name: mount-cgroup + image: "quay.io/cilium/cilium:v1.13.0@sha256:6544a3441b086a2e09005d3e21d1a4afb216fae19c5a60b35793c8a9438f8f68" + imagePullPolicy: IfNotPresent + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + command: + - sh + - -ec + # The statically linked Go program binary is invoked to avoid any + # dependency on utilities like sh and mount that can be missing on certain + # distros installed on the underlying host. Copy the binary to the + # same directory where we install cilium cni plugin so that exec permissions + # are available. + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "$BIN_PATH/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + volumeMounts: + - name: hostproc + mountPath: /hostproc + - name: cni-path + mountPath: /hostbin + terminationMessagePolicy: FallbackToLogsOnError + securityContext: + seLinuxOptions: + level: s0 + type: spc_t + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + - name: apply-sysctl-overwrites + image: "quay.io/cilium/cilium:v1.13.0@sha256:6544a3441b086a2e09005d3e21d1a4afb216fae19c5a60b35793c8a9438f8f68" + imagePullPolicy: IfNotPresent + env: + - name: BIN_PATH + value: /opt/cni/bin + command: + - sh + - -ec + # The statically linked Go program binary is invoked to avoid any + # dependency on utilities like sh that can be missing on certain + # distros installed on the underlying host. Copy the binary to the + # same directory where we install cilium cni plugin so that exec permissions + # are available. + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "$BIN_PATH/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + volumeMounts: + - name: hostproc + mountPath: /hostproc + - name: cni-path + mountPath: /hostbin + terminationMessagePolicy: FallbackToLogsOnError + securityContext: + seLinuxOptions: + level: s0 + type: spc_t + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + # Mount the bpf fs if it is not mounted. We will perform this task + # from a privileged container because the mount propagation bidirectional + # only works from privileged containers. + - name: mount-bpf-fs + image: "quay.io/cilium/cilium:v1.13.0@sha256:6544a3441b086a2e09005d3e21d1a4afb216fae19c5a60b35793c8a9438f8f68" + imagePullPolicy: IfNotPresent + args: + - 'mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf' + command: + - /bin/bash + - -c + - -- + terminationMessagePolicy: FallbackToLogsOnError + securityContext: + privileged: true + volumeMounts: + - name: bpf-maps + mountPath: /sys/fs/bpf + mountPropagation: Bidirectional + - name: clean-cilium-state + image: "quay.io/cilium/cilium:v1.13.0@sha256:6544a3441b086a2e09005d3e21d1a4afb216fae19c5a60b35793c8a9438f8f68" + imagePullPolicy: IfNotPresent + command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + name: cilium-config + key: clean-cilium-state + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + name: cilium-config + key: clean-cilium-bpf-state + optional: true + terminationMessagePolicy: FallbackToLogsOnError + securityContext: + seLinuxOptions: + level: s0 + type: spc_t + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + drop: + - ALL + volumeMounts: + - name: bpf-maps + mountPath: /sys/fs/bpf + # Required to mount cgroup filesystem from the host to cilium agent pod + - name: cilium-cgroup + mountPath: /run/cilium/cgroupv2 + mountPropagation: HostToContainer + - name: cilium-run + mountPath: /var/run/cilium + resources: + requests: + cpu: 100m + memory: 100Mi # wait-for-kube-proxy + restartPolicy: Always + priorityClassName: system-node-critical + serviceAccount: "cilium" + serviceAccountName: "cilium" + terminationGracePeriodSeconds: 1 + hostNetwork: true + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: cilium + topologyKey: kubernetes.io/hostname + nodeSelector: + kubernetes.io/os: linux + tolerations: + - operator: Exists + volumes: + # For sharing configuration between the "config" initContainer and the agent + - name: tmp + emptyDir: {} + # To keep state between restarts / upgrades + - name: cilium-run + hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + # To keep state between restarts / upgrades for bpf maps + - name: bpf-maps + hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate + # To mount cgroup2 filesystem on the host + - name: hostproc + hostPath: + path: /proc + type: Directory + # To keep state between restarts / upgrades for cgroup2 filesystem + - name: cilium-cgroup + hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + # To install cilium cni plugin in the host + - name: cni-path + hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + # To install cilium cni configuration in the host + - name: etc-cni-netd + hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + # To be able to load kernel modules + - name: lib-modules + hostPath: + path: /lib/modules + # To access iptables concurrently with other processes (e.g. kube-proxy) + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + # To read the clustermesh configuration + - name: clustermesh-secrets + secret: + secretName: cilium-clustermesh + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 + optional: true + - name: host-proc-sys-net + hostPath: + path: /proc/sys/net + type: Directory + - name: host-proc-sys-kernel + hostPath: + path: /proc/sys/kernel + type: Directory +--- +# Source: cilium/templates/cilium-operator/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cilium-operator + namespace: kube-system + labels: + io.cilium/app: operator + name: cilium-operator + app.kubernetes.io/part-of: cilium + app.kubernetes.io/name: cilium-operator +spec: + # See docs on ServerCapabilities.LeasesResourceLock in file pkg/k8s/version/version.go + # for more details. + replicas: 2 + selector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + labels: + io.cilium/app: operator + name: cilium-operator + app.kubernetes.io/part-of: cilium + app.kubernetes.io/name: cilium-operator + spec: + containers: + - name: cilium-operator + image: "quay.io/cilium/operator-generic:v1.13.0@sha256:4b58d5b33e53378355f6e8ceb525ccf938b7b6f5384b35373f1f46787467ebf5" + imagePullPolicy: IfNotPresent + command: + - cilium-operator-generic + args: + - --config-dir=/tmp/cilium/config-map + - --debug=$(CILIUM_DEBUG) + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_DEBUG + valueFrom: + configMapKeyRef: + key: debug + name: cilium-config + optional: true + livenessProbe: + httpGet: + host: "127.0.0.1" + path: /healthz + port: 9234 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - name: cilium-config-path + mountPath: /tmp/cilium/config-map + readOnly: true + terminationMessagePolicy: FallbackToLogsOnError + hostNetwork: true + restartPolicy: Always + priorityClassName: system-cluster-critical + serviceAccount: "cilium-operator" + serviceAccountName: "cilium-operator" + # In HA mode, cilium-operator pods must not be scheduled on the same + # node as they will clash with each other. + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + io.cilium/app: operator + topologyKey: kubernetes.io/hostname + nodeSelector: + kubernetes.io/os: linux + tolerations: + - operator: Exists + volumes: + # To read the configuration from the config map + - name: cilium-config-path + configMap: + name: cilium-config +--- +# Source: cilium/templates/cilium-secrets-namespace.yaml +# Only create the namespace if it's different from Ingress secret namespace or Ingress is not enabled. diff --git a/test/e2e/data/infrastructure-nutanix/v1alpha4/no-kubeproxy/cluster-template/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1alpha4/no-kubeproxy/cluster-template/kustomization.yaml new file mode 100644 index 0000000000..924f81da23 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1alpha4/no-kubeproxy/cluster-template/kustomization.yaml @@ -0,0 +1,5 @@ +bases: + - ../../cluster-template/ + +patchesStrategicMerge: + - ../no-kubeproxy.yaml \ No newline at end of file diff --git a/test/e2e/data/infrastructure-nutanix/v1alpha4/no-kubeproxy/no-kubeproxy.yaml b/test/e2e/data/infrastructure-nutanix/v1alpha4/no-kubeproxy/no-kubeproxy.yaml new file mode 100644 index 0000000000..ba5b499816 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1alpha4/no-kubeproxy/no-kubeproxy.yaml @@ -0,0 +1,23 @@ +--- +apiVersion: controlplane.cluster.x-k8s.io/v1alpha4 +kind: KubeadmControlPlane +metadata: + name: "${CLUSTER_NAME}-kcp" + namespace: "${NAMESPACE}" +spec: + kubeadmConfigSpec: + initConfiguration: + skipPhases: + - addon/kube-proxy +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha4 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-kcfg-0" + namespace: "${NAMESPACE}" +spec: + template: + spec: + initConfiguration: + skipPhases: + - addon/kube-proxy diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-additional-categories/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-additional-categories/kustomization.yaml new file mode 100644 index 0000000000..ec257a72f6 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-additional-categories/kustomization.yaml @@ -0,0 +1,5 @@ +bases: + - ../../cluster-template-additional-categories/ + +patchesStrategicMerge: + - ../no-kubeproxy.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-ccm/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-ccm/kustomization.yaml new file mode 100644 index 0000000000..2f3f12d1c1 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-ccm/kustomization.yaml @@ -0,0 +1,5 @@ +bases: + - ../../cluster-template-ccm/ + +patchesStrategicMerge: + - ../no-kubeproxy.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-csi/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-csi/kustomization.yaml new file mode 100644 index 0000000000..40a17254a2 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-csi/kustomization.yaml @@ -0,0 +1,5 @@ +bases: + - ../../cluster-template-csi/ + +patchesStrategicMerge: + - ../no-kubeproxy.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-kcp-remediation/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-kcp-remediation/kustomization.yaml new file mode 100644 index 0000000000..9bc9f28b01 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-kcp-remediation/kustomization.yaml @@ -0,0 +1,5 @@ +bases: + - ../../cluster-template-kcp-remediation/ + +patchesStrategicMerge: + - ../no-kubeproxy.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-kcp-scale-in/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-kcp-scale-in/kustomization.yaml new file mode 100644 index 0000000000..ff1312b3e1 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-kcp-scale-in/kustomization.yaml @@ -0,0 +1,5 @@ +bases: + - ../../cluster-template-kcp-scale-in/ + +patchesStrategicMerge: + - ../no-kubeproxy.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-md-remediation/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-md-remediation/kustomization.yaml new file mode 100644 index 0000000000..b68d75be2d --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-md-remediation/kustomization.yaml @@ -0,0 +1,5 @@ +bases: + - ../../cluster-template-md-remediation/ + +patchesStrategicMerge: + - ../no-kubeproxy.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-no-nmt/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-no-nmt/kustomization.yaml new file mode 100644 index 0000000000..6c48a7086e --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-no-nmt/kustomization.yaml @@ -0,0 +1,5 @@ +bases: + - ../../cluster-template-no-nmt/ + +patchesStrategicMerge: + - ../no-kubeproxy.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-no-nutanix-cluster/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-no-nutanix-cluster/kustomization.yaml new file mode 100644 index 0000000000..f33fa491d0 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-no-nutanix-cluster/kustomization.yaml @@ -0,0 +1,5 @@ +bases: + - ../../cluster-template-no-nutanix-cluster/ + +patchesStrategicMerge: + - ../no-kubeproxy.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-no-secret/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-no-secret/kustomization.yaml new file mode 100644 index 0000000000..c90364aff6 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-no-secret/kustomization.yaml @@ -0,0 +1,5 @@ +bases: + - ../../cluster-template-no-secret/ + +patchesStrategicMerge: + - ../no-kubeproxy.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-project/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-project/kustomization.yaml new file mode 100644 index 0000000000..f828d36702 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-project/kustomization.yaml @@ -0,0 +1,5 @@ +bases: + - ../../cluster-template-project/ + +patchesStrategicMerge: + - ../no-kubeproxy.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-upgrades/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-upgrades/kustomization.yaml new file mode 100644 index 0000000000..3fe17222f4 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-upgrades/kustomization.yaml @@ -0,0 +1,5 @@ +bases: + - ../../cluster-template-upgrades/ + +patchesStrategicMerge: + - ../no-kubeproxy.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template/kustomization.yaml new file mode 100644 index 0000000000..924f81da23 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template/kustomization.yaml @@ -0,0 +1,5 @@ +bases: + - ../../cluster-template/ + +patchesStrategicMerge: + - ../no-kubeproxy.yaml \ No newline at end of file diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/no-kubeproxy.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/no-kubeproxy.yaml new file mode 100644 index 0000000000..9e2e035443 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/no-kubeproxy.yaml @@ -0,0 +1,23 @@ +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: "${CLUSTER_NAME}-kcp" + namespace: "${NAMESPACE}" +spec: + kubeadmConfigSpec: + initConfiguration: + skipPhases: + - addon/kube-proxy +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-kcfg-0" + namespace: "${NAMESPACE}" +spec: + template: + spec: + initConfiguration: + skipPhases: + - addon/kube-proxy