diff --git a/README.md b/README.md index 06338a8..6c673fc 100644 --- a/README.md +++ b/README.md @@ -14,9 +14,11 @@ The `kube-vip-cloud-provider` will only implement the `loadBalancer` functionali - IP ranges [start address - end address] - Multiple pools by CIDR per namespace - Multiple IP ranges per namespace (handles overlapping ranges) +- Support for mixed IP families when specifying multiple pools or ranges - Setting of static addresses through `--load-balancer-ip=x.x.x.x` or through annotations `kube-vip.io/loadbalancerIPs: x.x.x.x` - Setting the special IP `0.0.0.0` for DHCP workflow. - Support single stack IPv6 or IPv4 +- Support for dualstack via the annotation: `kube-vip.io/loadbalancerIPs: 192.168.10.10,2001:db8::1` - Support ascending and descending search order by setting search-order=desc ## Installing the `kube-vip-cloud-provider` @@ -87,7 +89,46 @@ kubectl create configmap --namespace kube-system kubevip --from-literal range-gl ## Multiple pools or ranges -We can apply multiple pools or ranges by seperating them with commas.. i.e. `192.168.0.200/30,192.168.0.200/29` or `2001::12/127,2001::10/127` or `192.168.0.10-192.168.0.11,192.168.0.10-192.168.0.13` or `2001::10-2001::14,2001::20-2001::24` +We can apply multiple pools or ranges by seperating them with commas.. i.e. `192.168.0.200/30,192.168.0.200/29` or `2001::12/127,2001::10/127` or `192.168.0.10-192.168.0.11,192.168.0.10-192.168.0.13` or `2001::10-2001::14,2001::20-2001::24` or `192.168.0.200/30,2001::10/127` + +## Dualstack Services + +Suppose a pool in the configmap is as follows: `range-default: 192.168.0.10-192.168.0.11,2001::10-2001::11` +and there are no IPs currently in use. + +Then by creating a service with the following spec (with `IPv6` specified first in `ipFamilies`): +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service + labels: + app.kubernetes.io/name: MyApp +spec: + ipFamilyPolicy: PreferDualStack + ipFamilies: + - IPv6 + - IPv4 + selector: + app.kubernetes.io/name: MyApp + ports: + - protocol: TCP + port: 80 +``` + +The service will receive the annotation `kube-vip.io/loadbalancerIPs: +2001::10,192.168.0.10` following the intent to prefer IPv6. Conversely, if +`IPv4` were specified first, then the IPv4 address will appear first in the +annotation. + +With the `PreferDualStack` IP family policy, kube-vip-cloud-provider will make a +best effort to provide at least one IP in `loadBalancerIPs` as long as any IP family +in the pool has available addresses. + +If `RequireDualStack` is specified, then kube-vip-cloud-provider will fail to +set the `kube-vip.io/loadbalancerIPs` annotation if it cannot find an available +address in each of both IP families for the pool. + ## Special DHCP CIDR diff --git a/pkg/ipam/addressbuilder.go b/pkg/ipam/addressbuilder.go index fcd4c67..544cd0a 100644 --- a/pkg/ipam/addressbuilder.go +++ b/pkg/ipam/addressbuilder.go @@ -8,8 +8,8 @@ import ( "go4.org/netipx" ) -// buildHostsFromCidr - Builds a IPSet constructed from the cidr -func buildHostsFromCidr(cidr string) (*netipx.IPSet, error) { +// parseCidr - Builds an IPSet constructed from the cidrs +func parseCidrs(cidr string) (*netipx.IPSet, error) { // Split the ipranges (comma separated) cidrs := strings.Split(cidr, ",") if len(cidrs) == 0 { @@ -23,6 +23,21 @@ func buildHostsFromCidr(cidr string) (*netipx.IPSet, error) { if err != nil { return nil, err } + builder.AddPrefix(prefix) + } + return builder.IPSet() +} + +// buildHostsFromCidr - Builds a IPSet constructed from the cidr and filters out +// the broadcast IP and network IP for IPv4 networks +func buildHostsFromCidr(cidr string) (*netipx.IPSet, error) { + unfilteredSet, err := parseCidrs(cidr) + if err != nil { + return nil, err + } + + builder := &netipx.IPSetBuilder{} + for _, prefix := range unfilteredSet.Prefixes() { if prefix.IsSingleIP() { builder.Add(prefix.Addr()) continue @@ -31,7 +46,6 @@ func buildHostsFromCidr(cidr string) (*netipx.IPSet, error) { builder.AddPrefix(prefix) continue } - if r := netipx.RangeOfPrefix(prefix); r.IsValid() { if prefix.Bits() == 31 { // rfc3021 Using 31-Bit Prefixes on IPv4 Point-to-Point Links @@ -77,3 +91,49 @@ func buildAddressesFromRange(ipRangeString string) (*netipx.IPSet, error) { return builder.IPSet() } + +// SplitCIDRsByIPFamily splits the cidrs into separate lists of ipv4 +// and ipv6 CIDRs +func SplitCIDRsByIPFamily(cidrs string) (ipv4 string, ipv6 string, err error) { + ipPools, err := parseCidrs(cidrs) + if err != nil { + return "", "", err + } + ipv4Cidrs := strings.Builder{} + ipv6Cidrs := strings.Builder{} + for _, prefix := range ipPools.Prefixes() { + cidrsToEdit := &ipv4Cidrs + if prefix.Addr().Is6() { + cidrsToEdit = &ipv6Cidrs + } + if cidrsToEdit.Len() > 0 { + cidrsToEdit.WriteByte(',') + } + _, _ = cidrsToEdit.WriteString(prefix.String()) + } + return ipv4Cidrs.String(), ipv6Cidrs.String(), nil +} + +// SplitRangesByIPFamily splits the ipRangeString into separate lists of ipv4 +// and ipv6 ranges +func SplitRangesByIPFamily(ipRangeString string) (ipv4 string, ipv6 string, err error) { + ipPools, err := buildAddressesFromRange(ipRangeString) + if err != nil { + return "", "", err + } + ipv4Ranges := strings.Builder{} + ipv6Ranges := strings.Builder{} + for _, ipRange := range ipPools.Ranges() { + rangeToEdit := &ipv4Ranges + if ipRange.From().Is6() { + rangeToEdit = &ipv6Ranges + } + if rangeToEdit.Len() > 0 { + rangeToEdit.WriteByte(',') + } + _, _ = rangeToEdit.WriteString(ipRange.From().String()) + _ = rangeToEdit.WriteByte('-') + _, _ = rangeToEdit.WriteString(ipRange.To().String()) + } + return ipv4Ranges.String(), ipv6Ranges.String(), nil +} diff --git a/pkg/ipam/ipam.go b/pkg/ipam/ipam.go index d1f6b95..ac8dd27 100644 --- a/pkg/ipam/ipam.go +++ b/pkg/ipam/ipam.go @@ -9,6 +9,20 @@ import ( "k8s.io/klog" ) +type OutOfIPsError struct { + namespace string + pool string + isCidr bool +} + +func (e *OutOfIPsError) Error() string { + what := "range" + if e.isCidr { + what = "cidr" + } + return fmt.Sprintf("no addresses available in [%s] %s [%s]", e.namespace, what, e.pool) +} + // Manager - handles the addresses for each namespace/vip var Manager []ipManager @@ -46,7 +60,7 @@ func FindAvailableHostFromRange(namespace, ipRange string, inUseIPSet *netipx.IP addr, err := FindFreeAddress(Manager[x].poolIPSet, inUseIPSet, descOrder) if err != nil { - return "", fmt.Errorf("no addresses available in [%s] range [%s]", namespace, ipRange) + return "", &OutOfIPsError{namespace: namespace, pool: ipRange, isCidr: false} } return addr.String(), nil } @@ -67,7 +81,7 @@ func FindAvailableHostFromRange(namespace, ipRange string, inUseIPSet *netipx.IP addr, err := FindFreeAddress(poolIPSet, inUseIPSet, descOrder) if err != nil { - return "", fmt.Errorf("no addresses available in [%s] range [%s]", namespace, ipRange) + return "", &OutOfIPsError{namespace: namespace, pool: ipRange, isCidr: false} } return addr.String(), nil } @@ -91,7 +105,7 @@ func FindAvailableHostFromCidr(namespace, cidr string, inUseIPSet *netipx.IPSet, } addr, err := FindFreeAddress(Manager[x].poolIPSet, inUseIPSet, descOrder) if err != nil { - return "", fmt.Errorf("no addresses available in [%s] cidr [%s]", namespace, cidr) + return "", &OutOfIPsError{namespace: namespace, pool: cidr, isCidr: true} } return addr.String(), nil @@ -111,7 +125,7 @@ func FindAvailableHostFromCidr(namespace, cidr string, inUseIPSet *netipx.IPSet, addr, err := FindFreeAddress(poolIPSet, inUseIPSet, descOrder) if err != nil { - return "", fmt.Errorf("no addresses available in [%s] cidr [%s]", namespace, cidr) + return "", &OutOfIPsError{namespace: namespace, pool: cidr, isCidr: true} } return addr.String(), nil diff --git a/pkg/ipam/ipam_test.go b/pkg/ipam/ipam_test.go index b613460..f83ca17 100644 --- a/pkg/ipam/ipam_test.go +++ b/pkg/ipam/ipam_test.go @@ -201,6 +201,198 @@ func Test_buildHostsFromCidr(t *testing.T) { } } +func TestSplitCIDRsByIPFamily(t *testing.T) { + type args struct { + cidrs string + } + type output struct { + ipv4Cidrs string + ipv6Cidrs string + } + tests := []struct { + name string + args args + want output + wantErr bool + }{ + { + name: "single ipv4 cidr", + args: args{ + "192.168.0.200/30", + }, + want: output{ + ipv4Cidrs: "192.168.0.200/30", + ipv6Cidrs: "", + }, + wantErr: false, + }, + { + name: "multiple ipv4 cidrs", + args: args{ + "192.168.0.200/30,192.168.1.200/30", + }, + want: output{ + ipv4Cidrs: "192.168.0.200/30,192.168.1.200/30", + ipv6Cidrs: "", + }, + wantErr: false, + }, + { + name: "single ipv6 cidr", + args: args{ + "fe80::10/127", + }, + want: output{ + ipv4Cidrs: "", + ipv6Cidrs: "fe80::10/127", + }, + wantErr: false, + }, + { + name: "multiple ipv6 cidrs", + args: args{ + "fe80::10/127,fe80::fe/127", + }, + want: output{ + ipv4Cidrs: "", + ipv6Cidrs: "fe80::10/127,fe80::fe/127", + }, + wantErr: false, + }, + { + name: "one ipv4 cidr and one ipv6 cidr", + args: args{ + "192.168.0.200/30,fe80::10/127", + }, + want: output{ + ipv4Cidrs: "192.168.0.200/30", + ipv6Cidrs: "fe80::10/127", + }, + wantErr: false, + }, + { + name: "multiple ipv4 cidrs and multiple ipv6 cidrs", + args: args{ + "192.168.0.200/30,192.168.1.200/30,fe80::10/127,fe80::fe/127", + }, + want: output{ + ipv4Cidrs: "192.168.0.200/30,192.168.1.200/30", + ipv6Cidrs: "fe80::10/127,fe80::fe/127", + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ipv4Cidrs, ipv6Cidrs, err := SplitCIDRsByIPFamily(tt.args.cidrs) + if (err != nil) != tt.wantErr { + t.Errorf("SplitCIDRsByIPFamily() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if ipv4Cidrs != tt.want.ipv4Cidrs || ipv6Cidrs != tt.want.ipv6Cidrs { + t.Errorf("SplitCIDRsByIPFamily() = {ipv4Cidrs: %v, ipv6Cidrs: %v}, want %+v", ipv4Cidrs, ipv6Cidrs, tt.want) + } + }) + } +} + +func TestSplitRangesByIPFamily(t *testing.T) { + type args struct { + ipRangeString string + } + type output struct { + ipv4Ranges string + ipv6Ranges string + } + tests := []struct { + name string + args args + want output + wantErr bool + }{ + { + name: "single ipv4 range", + args: args{ + "192.168.0.10-192.168.0.12", + }, + want: output{ + ipv4Ranges: "192.168.0.10-192.168.0.12", + ipv6Ranges: "", + }, + wantErr: false, + }, + { + name: "multiple ipv4 ranges", + args: args{ + "192.168.0.10-192.168.0.12,192.168.0.100-192.168.0.120", + }, + want: output{ + ipv4Ranges: "192.168.0.10-192.168.0.12,192.168.0.100-192.168.0.120", + ipv6Ranges: "", + }, + wantErr: false, + }, + { + name: "single ipv6 range", + args: args{ + "fe80::13-fe80::14", + }, + want: output{ + ipv4Ranges: "", + ipv6Ranges: "fe80::13-fe80::14", + }, + wantErr: false, + }, + { + name: "multiple ipv6 ranges", + args: args{ + "fe80::13-fe80::14,fe80::130-fe80::140", + }, + want: output{ + ipv4Ranges: "", + ipv6Ranges: "fe80::13-fe80::14,fe80::130-fe80::140", + }, + wantErr: false, + }, + { + name: "one ipv4 range and one ipv6 range", + args: args{ + "192.168.0.10-192.168.0.12,fe80::13-fe80::14", + }, + want: output{ + ipv4Ranges: "192.168.0.10-192.168.0.12", + ipv6Ranges: "fe80::13-fe80::14", + }, + wantErr: false, + }, + { + name: "multiple ipv4 ranges and multiple ipv6 ranges", + args: args{ + "192.168.0.10-192.168.0.12,192.168.0.100-192.168.0.120,fe80::13-fe80::14,fe80::130-fe80::140", + }, + want: output{ + ipv4Ranges: "192.168.0.10-192.168.0.12,192.168.0.100-192.168.0.120", + ipv6Ranges: "fe80::13-fe80::14,fe80::130-fe80::140", + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ipv4Ranges, ipv6Ranges, err := SplitRangesByIPFamily(tt.args.ipRangeString) + if (err != nil) != tt.wantErr { + t.Errorf("SplitRangesByIPFamily() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if ipv4Ranges != tt.want.ipv4Ranges || ipv6Ranges != tt.want.ipv6Ranges { + t.Errorf("SplitRangesByIPFamily() = {ipv4Ranges: %v, ipv6Ranges: %v}, want %+v", ipv4Ranges, ipv6Ranges, tt.want) + } + }) + } +} + func TestFindAvailableHostFromRange(t *testing.T) { type args struct { namespace string diff --git a/pkg/provider/loadBalancer.go b/pkg/provider/loadBalancer.go index b2dbf84..b0c2e77 100644 --- a/pkg/provider/loadBalancer.go +++ b/pkg/provider/loadBalancer.go @@ -20,7 +20,7 @@ import ( const ( // this annotation is for specifying IPs for a loadbalancer // use plural for dual stack support in the future - // Example: kube-vip.io/loadbalancerIPs: 10.1.2.3 + // Example: kube-vip.io/loadbalancerIPs: 10.1.2.3,fd00::100 loadbalancerIPsAnnotations = "kube-vip.io/loadbalancerIPs" implementationLabelKey = "implementation" implementationLabelValue = "kube-vip" @@ -192,8 +192,7 @@ func (k *kubevipLoadBalancerManager) syncLoadBalancer(ctx context.Context, servi descOrder := getSearchOrder(controllerCM) // If the LoadBalancer address is empty, then do a local IPAM lookup - loadBalancerIP, err := discoverAddress(service.Namespace, pool, inUseSet, descOrder) - + loadBalancerIPs, err := discoverVIPs(service.Namespace, pool, inUseSet, descOrder, service.Spec.IPFamilyPolicy, service.Spec.IPFamilies) if err != nil { return nil, err } @@ -205,7 +204,7 @@ func (k *kubevipLoadBalancerManager) syncLoadBalancer(ctx context.Context, servi return getErr } - klog.Infof("Updating service [%s], with load balancer IPAM address [%s]", service.Name, loadBalancerIP) + klog.Infof("Updating service [%s], with load balancer IPAM address(es) [%s]", service.Name, loadBalancerIPs) if recentService.Labels == nil { // Just because .. @@ -218,11 +217,11 @@ func (k *kubevipLoadBalancerManager) syncLoadBalancer(ctx context.Context, servi recentService.Annotations = make(map[string]string) } // use annotation instead of label to support ipv6 - recentService.Annotations[loadbalancerIPsAnnotations] = loadBalancerIP + recentService.Annotations[loadbalancerIPsAnnotations] = loadBalancerIPs // this line will be removed once kube-vip can recognize annotations // Set IPAM address to Load Balancer Service - recentService.Spec.LoadBalancerIP = loadBalancerIP + recentService.Spec.LoadBalancerIP = strings.Split(loadBalancerIPs, ",")[0] // Update the actual service with the address and the labels _, updateErr := k.kubeClient.CoreV1().Services(recentService.Namespace).Update(ctx, recentService, metav1.UpdateOptions{}) @@ -276,6 +275,105 @@ func discoverPool(cm *v1.ConfigMap, namespace, configMapName string) (pool strin return "", false, fmt.Errorf("no address pools could be found") } +func discoverVIPs( + namespace, pool string, inUseIPSet *netipx.IPSet, descOrder bool, + ipFamilyPolicy *v1.IPFamilyPolicy, ipFamilies []v1.IPFamily, +) (vips string, err error) { + var ipv4Pool, ipv6Pool string + + // Check if DHCP is required + if pool == "0.0.0.0/32" { + return "0.0.0.0", nil + // Check if ip pool contains a cidr, if not assume it is a range + } else if len(pool) == 0 { + return "", fmt.Errorf("could not discover address: pool is not specified") + } else if strings.Contains(pool, "/") { + ipv4Pool, ipv6Pool, err = ipam.SplitCIDRsByIPFamily(pool) + } else { + ipv4Pool, ipv6Pool, err = ipam.SplitRangesByIPFamily(pool) + } + if err != nil { + return "", err + } + + vipBuilder := strings.Builder{} + + // Handle single stack case + if ipFamilyPolicy == nil || *ipFamilyPolicy == v1.IPFamilyPolicySingleStack { + ipPool := ipv4Pool + if len(ipFamilies) == 0 { + if len(ipv4Pool) == 0 { + ipPool = ipv6Pool + } + } else if ipFamilies[0] == v1.IPv6Protocol { + ipPool = ipv6Pool + } + if len(ipPool) == 0 { + return "", fmt.Errorf("could not find suitable pool for the IP family of the service") + } + return discoverAddress(namespace, ipPool, inUseIPSet, descOrder) + } + + // Handle dual stack case + if *ipFamilyPolicy == v1.IPFamilyPolicyRequireDualStack { + // With RequireDualStack, we want to make sure both pools with both IP + // families exist + if len(ipv4Pool) == 0 || len(ipv6Pool) == 0 { + return "", fmt.Errorf("service requires dual-stack, but the configuration does not have both IPv4 and IPv6 pools listed for the namespace") + } + } + + primaryPool := ipv4Pool + secondaryPool := ipv6Pool + if len(ipFamilies) > 0 && ipFamilies[0] == v1.IPv6Protocol { + primaryPool = ipv6Pool + secondaryPool = ipv4Pool + } + // Provide VIPs from both IP families if possible (guaranteed if RequireDualStack) + var primaryPoolErr, secondaryPoolErr error + if len(primaryPool) > 0 { + primaryVip, err := discoverAddress(namespace, primaryPool, inUseIPSet, descOrder) + if err == nil { + _, _ = vipBuilder.WriteString(primaryVip) + } else if _, outOfIPs := err.(*ipam.OutOfIPsError); outOfIPs { + primaryPoolErr = err + } else { + return "", err + } + } + if len(secondaryPool) > 0 { + secondaryVip, err := discoverAddress(namespace, secondaryPool, inUseIPSet, descOrder) + if err == nil { + if vipBuilder.Len() > 0 { + vipBuilder.WriteByte(',') + } + _, _ = vipBuilder.WriteString(secondaryVip) + } else if _, outOfIPs := err.(*ipam.OutOfIPsError); outOfIPs { + secondaryPoolErr = err + } else { + return "", err + } + } + if *ipFamilyPolicy == v1.IPFamilyPolicyPreferDualStack { + if primaryPoolErr != nil && secondaryPoolErr != nil { + return "", fmt.Errorf("could not allocate any IP address for PreferDualStack service: %s", renderErrors(primaryPoolErr, secondaryPoolErr)) + } + singleError := primaryPoolErr + if secondaryPoolErr != nil { + singleError = secondaryPoolErr + } + if singleError != nil { + klog.Warningf("PreferDualStack service will be single-stack because of error: %s", singleError) + } + } else if *ipFamilyPolicy == v1.IPFamilyPolicyRequireDualStack { + if primaryPoolErr != nil || secondaryPoolErr != nil { + return "", fmt.Errorf("could not allocate required IP addresses for RequireDualStack service: %s", renderErrors(primaryPoolErr, secondaryPoolErr)) + } + } + + return vipBuilder.String(), nil +} + func discoverAddress(namespace, pool string, inUseIPSet *netipx.IPSet, descOrder bool) (vip string, err error) { // Check if DHCP is required if pool == "0.0.0.0/32" { @@ -308,3 +406,13 @@ func getSearchOrder(cm *v1.ConfigMap) (descOrder bool) { } return false } + +func renderErrors(errs ...error) string { + s := strings.Builder{} + for _, err := range errs { + if err != nil { + s.WriteString(fmt.Sprintf("\n\t- %s", err)) + } + } + return s.String() +} diff --git a/pkg/provider/loadBalancer_test.go b/pkg/provider/loadBalancer_test.go index 18d908f..0b76194 100644 --- a/pkg/provider/loadBalancer_test.go +++ b/pkg/provider/loadBalancer_test.go @@ -314,6 +314,342 @@ func Test_DiscoveryAddressRange(t *testing.T) { } } +func ipFamilyPolicyPtr(p v1.IPFamilyPolicy) *v1.IPFamilyPolicy { + return &p +} + +func Test_discoverVIPs(t *testing.T) { + type args struct { + ipFamilyPolicy *v1.IPFamilyPolicy + ipFamilies []v1.IPFamily + pool string + existingServiceIPS []string + } + + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "IPv4 pool", + args: args{ + ipFamilyPolicy: nil, + ipFamilies: nil, + pool: "10.10.10.8-10.10.10.15", + existingServiceIPS: []string{"10.10.10.8", "10.10.10.9", "10.10.10.10", "10.10.10.12"}, + }, + want: "10.10.10.11", + wantErr: false, + }, + { + name: "IPv4 pool with IPv4 service", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicySingleStack), + ipFamilies: []v1.IPFamily{v1.IPv4Protocol}, + pool: "10.10.10.8-10.10.10.15", + existingServiceIPS: []string{"10.10.10.8", "10.10.10.9", "10.10.10.10", "10.10.10.12"}, + }, + want: "10.10.10.11", + wantErr: false, + }, + { + name: "IPv6 pool", + args: args{ + ipFamilyPolicy: nil, + ipFamilies: nil, + pool: "fd00::1-fd00::10", + existingServiceIPS: []string{"fd00::1", "fd00::2", "fd00::4"}, + }, + want: "fd00::3", + wantErr: false, + }, + { + name: "IPv6 pool with IPv6 service", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicySingleStack), + ipFamilies: []v1.IPFamily{v1.IPv6Protocol}, + pool: "fd00::1-fd00::10", + existingServiceIPS: []string{"fd00::1", "fd00::2", "fd00::4"}, + }, + want: "fd00::3", + wantErr: false, + }, + { + name: "IPv6 pool with IPv4 service", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicySingleStack), + ipFamilies: []v1.IPFamily{v1.IPv4Protocol}, + pool: "fd00::1-fd00::10", + }, + want: "", + wantErr: true, + }, + { + name: "IPv4 pool with IPv6 service", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicySingleStack), + ipFamilies: []v1.IPFamily{v1.IPv6Protocol}, + pool: "10.10.10.8-10.10.10.15", + }, + want: "", + wantErr: true, + }, + { + name: "IPv4 pool with PreferDualStack service", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicyPreferDualStack), + ipFamilies: []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol}, + pool: "10.10.10.8-10.10.10.15", + existingServiceIPS: []string{"10.10.10.8", "10.10.10.9", "10.10.10.10", "10.10.10.12"}, + }, + want: "10.10.10.11", + wantErr: false, + }, + { + name: "IPv6 pool with PreferDualStack service", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicyPreferDualStack), + ipFamilies: []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol}, + pool: "fd00::1-fd00::10", + existingServiceIPS: []string{"fd00::1", "fd00::2", "fd00::4"}, + }, + want: "fd00::3", + wantErr: false, + }, + { + name: "dualstack pool with PreferDualStack service with no IP families explicitly specified", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicyPreferDualStack), + pool: "10.10.10.8-10.10.10.15,fd00::1-fd00::10", + }, + want: "10.10.10.8,fd00::1", + wantErr: false, + }, + { + name: "dualstack pool with PreferDualStack IPv4,IPv6 service", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicyPreferDualStack), + ipFamilies: []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol}, + pool: "10.10.10.8-10.10.10.15,fd00::1-fd00::10", + }, + want: "10.10.10.8,fd00::1", + wantErr: false, + }, + { + name: "dualstack pool with PreferDualStack IPv6,IPv4 service", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicyPreferDualStack), + ipFamilies: []v1.IPFamily{v1.IPv6Protocol, v1.IPv4Protocol}, + pool: "10.10.10.8-10.10.10.15,fd00::1-fd00::10", + }, + want: "fd00::1,10.10.10.8", + wantErr: false, + }, + { + name: "dualstack pool with PreferDualStack IPv4,IPv6 service, but the IPv6 pool has no available addresses", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicyPreferDualStack), + ipFamilies: []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol}, + pool: "10.10.10.8-10.10.10.9,fd00::1-fd00::2", + existingServiceIPS: []string{"fd00::1", "fd00::2"}, + }, + want: "10.10.10.8", + wantErr: false, + }, + { + name: "dualstack pool with PreferDualStack IPv4,IPv6 service, but the IPv4 pool has no available addresses", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicyPreferDualStack), + ipFamilies: []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol}, + pool: "10.10.10.8-10.10.10.9,fd00::1-fd00::2", + existingServiceIPS: []string{"10.10.10.8", "10.10.10.9"}, + }, + want: "fd00::1", + wantErr: false, + }, + { + name: "dualstack pool with PreferDualStack IPv6,IPv4 service, but the IPv6 pool has no available addresses", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicyPreferDualStack), + ipFamilies: []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol}, + pool: "10.10.10.8-10.10.10.9,fd00::1-fd00::2", + existingServiceIPS: []string{"fd00::1", "fd00::2"}, + }, + want: "10.10.10.8", + wantErr: false, + }, + { + name: "dualstack pool with PreferDualStack IPv6,IPv4 service, but the IPv4 pool has no available addresses", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicyPreferDualStack), + ipFamilies: []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol}, + pool: "10.10.10.8-10.10.10.9,fd00::1-fd00::2", + existingServiceIPS: []string{"10.10.10.8", "10.10.10.9"}, + }, + want: "fd00::1", + wantErr: false, + }, + { + name: "dualstack pool with PreferDualStack IPv4,IPv6 service, but no pools have available addresses", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicyPreferDualStack), + ipFamilies: []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol}, + pool: "10.10.10.8-10.10.10.9,fd00::1-fd00::2", + existingServiceIPS: []string{"10.10.10.8", "10.10.10.9", "fd00::1", "fd00::2"}, + }, + want: "", + wantErr: true, + }, + { + name: "dualstack pool with PreferDualStack IPv4,IPv6 service, but there is an invalid pool", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicyPreferDualStack), + ipFamilies: []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol}, + pool: "10.10.10.8-10.10.10.9,fd00::1-fd00::2,invalid-pool", + existingServiceIPS: []string{}, + }, + want: "", + wantErr: true, + }, + { + name: "IPv4 pool with RequireDualStack service", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicyRequireDualStack), + ipFamilies: []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol}, + pool: "10.10.10.8-10.10.10.15", + }, + want: "", + wantErr: true, + }, + { + name: "IPv6 pool with RequireDualStack service", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicyRequireDualStack), + ipFamilies: []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol}, + pool: "fd00::1-fd00::10", + }, + want: "", + wantErr: true, + }, + { + name: "empty pool with RequireDualStack service", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicyRequireDualStack), + ipFamilies: []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol}, + pool: "", + }, + want: "", + wantErr: true, + }, + { + name: "dualstack pool with RequireDualStack IPv4,IPv6 service", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicyRequireDualStack), + ipFamilies: []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol}, + pool: "10.10.10.8-10.10.10.15,fd00::1-fd00::10", + }, + want: "10.10.10.8,fd00::1", + wantErr: false, + }, + { + name: "dualstack pool with RequireDualStack IPv6,IPv4 service", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicyRequireDualStack), + ipFamilies: []v1.IPFamily{v1.IPv6Protocol, v1.IPv4Protocol}, + pool: "10.10.10.8-10.10.10.15,fd00::1-fd00::10", + }, + want: "fd00::1,10.10.10.8", + wantErr: false, + }, + { + name: "dualstack pool with RequireDualStack IPv4,IPv6 service, but the IPv6 pool has no available addresses", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicyRequireDualStack), + ipFamilies: []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol}, + pool: "10.10.10.8-10.10.10.9,fd00::1-fd00::2", + existingServiceIPS: []string{"fd00::1", "fd00::2"}, + }, + want: "", + wantErr: true, + }, + { + name: "dualstack pool with RequireDualStack IPv4,IPv6 service, but the IPv4 pool has no available addresses", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicyRequireDualStack), + ipFamilies: []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol}, + pool: "10.10.10.8-10.10.10.9,fd00::1-fd00::2", + existingServiceIPS: []string{"10.10.10.8", "10.10.10.9"}, + }, + want: "", + wantErr: true, + }, + { + name: "dualstack pool with RequireDualStack IPv6,IPv4 service, but the IPv6 pool has no available addresses", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicyRequireDualStack), + ipFamilies: []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol}, + pool: "10.10.10.8-10.10.10.9,fd00::1-fd00::2", + existingServiceIPS: []string{"fd00::1", "fd00::2"}, + }, + want: "", + wantErr: true, + }, + { + name: "dualstack pool with RequireDualStack IPv6,IPv4 service, but the IPv4 pool has no available addresses", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicyRequireDualStack), + ipFamilies: []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol}, + pool: "10.10.10.8-10.10.10.9,fd00::1-fd00::2", + existingServiceIPS: []string{"10.10.10.8", "10.10.10.9"}, + }, + want: "", + wantErr: true, + }, + { + name: "dualstack pool with RequireDualStack IPv4,IPv6 service, but no pools have available addresses", + args: args{ + ipFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicyRequireDualStack), + ipFamilies: []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol}, + pool: "10.10.10.8-10.10.10.9,fd00::1-fd00::2", + existingServiceIPS: []string{"10.10.10.8", "10.10.10.9", "fd00::1", "fd00::2"}, + }, + want: "", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + builder := &netipx.IPSetBuilder{} + for i := range tt.args.existingServiceIPS { + addr, err := netip.ParseAddr(tt.args.existingServiceIPS[i]) + if err != nil { + t.Errorf("discoverVIP() error = %v", err) + return + } + builder.Add(addr) + } + s, err := builder.IPSet() + if err != nil { + t.Errorf("discoverVIP() error = %v", err) + return + } + + gotString, err := discoverVIPs("discover-vips-test-ns", tt.args.pool, s, false, tt.args.ipFamilyPolicy, tt.args.ipFamilies) + if (err != nil) != tt.wantErr { + t.Errorf("discoverVIP() error: %v, expected: %v", err, tt.wantErr) + return + } + if !assert.EqualValues(t, tt.want, gotString) { + t.Errorf("discoverVIP() returned: %s, expected: %s", gotString, tt.want) + } + }) + } + +} + func Test_syncLoadBalancer(t *testing.T) { tests := []struct { @@ -483,6 +819,46 @@ func Test_syncLoadBalancer(t *testing.T) { }, }, }, + { + name: "dualstack loadbalancer", + originalService: v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "name", + }, + Spec: v1.ServiceSpec{ + IPFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicyRequireDualStack), + IPFamilies: []v1.IPFamily{v1.IPv6Protocol, v1.IPv4Protocol}, + }, + }, + + poolConfigMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: KubeVipClientConfig, + Namespace: KubeVipClientConfigNamespace, + }, + Data: map[string]string{ + "cidr-global": "10.120.120.1/24,fe80::10/126", + }, + }, + expectedService: v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "name", + Labels: map[string]string{ + "implementation": "kube-vip", + }, + Annotations: map[string]string{ + "kube-vip.io/loadbalancerIPs": "fe80::10,10.120.120.1", + }, + }, + Spec: v1.ServiceSpec{ + IPFamilyPolicy: ipFamilyPolicyPtr(v1.IPFamilyPolicyRequireDualStack), + IPFamilies: []v1.IPFamily{v1.IPv6Protocol, v1.IPv4Protocol}, + LoadBalancerIP: "fe80::10", + }, + }, + }, } for _, tt := range tests { @@ -529,7 +905,7 @@ func Test_syncLoadBalancer(t *testing.T) { t.Error(err) } - assert.EqualValues(t, *resService, tt.expectedService) + assert.EqualValues(t, tt.expectedService, *resService) }) } }