Skip to content

Commit 5a92530

Browse files
authored
[improvement] : Update dualstack flavors (#816)
* update dualstack flavor for kubeadm and k3s based clusters * specify range for service ips as well * add ipv6 servicecidr for k3s flavor as well
1 parent 00a50e0 commit 5a92530

File tree

6 files changed

+237
-157
lines changed

6 files changed

+237
-157
lines changed

docs/src/SUMMARY.md

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@
2222
- [Etcd Backup](./topics/flavors/etcd-backup-restore.md)
2323
- [Etcd-disk](./topics/flavors/etcd-disk.md)
2424
- [Flatcar](./topics/flavors/flatcar.md)
25-
- [NodeIPAM CCM (kubeadm)](./topics/flavors/nodeipam-ccm.md)
2625
- [k3s](./topics/flavors/k3s.md)
2726
- [konnectivity (kubeadm)](./topics/flavors/konnectivity.md)
2827
- [rke2](./topics/flavors/rke2.md)

docs/src/topics/flavors/dual-stack.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,7 @@
11
# Dual-Stack
2+
3+
This flavor enables allocating both IPv4 and IPv6 ranges to nodes within k8s cluster. This flavor disables nodeipam controller within kube-controller-manager and uses CCM specific nodeipam controller to allocate CIDRs to Nodes. IPv6 ranges are allocated to VPC, Subnets and Nodes attached to those subnets. Pods get both ipv4 and ipv6 addresses.
4+
25
## Specification
36
| Supported Control Plane | CNI | Default OS | Installs ClusterClass | IPv4 | IPv6 |
47
|-------------------------|--------|--------------|-----------------------|------|------|

docs/src/topics/flavors/nodeipam-ccm.md

Lines changed: 0 additions & 26 deletions
This file was deleted.
Lines changed: 110 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,26 @@
11
apiVersion: kustomize.config.k8s.io/v1beta1
22
kind: Kustomization
33
resources:
4-
- ../vpcless
4+
- ../default
55

66
patches:
77
- target:
88
group: infrastructure.cluster.x-k8s.io
99
version: v1alpha2
10-
kind: LinodeCluster
10+
kind: LinodeVPC
1111
patch: |-
1212
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
13-
kind: LinodeCluster
13+
kind: LinodeVPC
1414
metadata:
1515
name: ${CLUSTER_NAME}
1616
spec:
17-
nodeBalancerFirewallRef: null
17+
ipv6Range:
18+
- range: auto
19+
subnets:
20+
- ipv4: ${VPC_NETWORK_CIDR:=10.0.0.0/8}
21+
label: default
22+
ipv6Range:
23+
- range: auto
1824
- target:
1925
group: cluster.x-k8s.io
2026
version: v1beta1
@@ -29,11 +35,40 @@ patches:
2935
pods:
3036
cidrBlocks:
3137
- 10.192.0.0/10
32-
- fd02::/80
3338
services:
3439
cidrBlocks:
3540
- 10.96.0.0/12
3641
- fd03::/108
42+
- target:
43+
group: infrastructure.cluster.x-k8s.io
44+
version: v1alpha2
45+
kind: LinodeMachineTemplate
46+
patch: |-
47+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
48+
kind: LinodeMachineTemplate
49+
metadata:
50+
name: ${CLUSTER_NAME}-control-plane
51+
spec:
52+
template:
53+
spec:
54+
ipv6Options:
55+
enableSLAAC: true
56+
isPublicIPv6: true
57+
- target:
58+
group: infrastructure.cluster.x-k8s.io
59+
version: v1alpha2
60+
kind: LinodeMachineTemplate
61+
patch: |-
62+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
63+
kind: LinodeMachineTemplate
64+
metadata:
65+
name: ${CLUSTER_NAME}-md-0
66+
spec:
67+
template:
68+
spec:
69+
ipv6Options:
70+
enableSLAAC: true
71+
isPublicIPv6: true
3772
- target:
3873
group: controlplane.cluster.x-k8s.io
3974
version: v1beta2
@@ -47,9 +82,7 @@ patches:
4782
kthreesConfigSpec:
4883
serverConfig:
4984
kubeControllerManagerArgs:
50-
- "node-cidr-mask-size-ipv6=96"
51-
clusterCidr: "10.192.0.0/10,fd02::/80"
52-
serviceCidr: "10.96.0.0/12,fd03::/108"
85+
- "allocate-node-cidrs=false"
5386
- target:
5487
group: controlplane.cluster.x-k8s.io
5588
version: v1beta2
@@ -65,18 +98,29 @@ patches:
6598
namespace: kube-system
6699
spec:
67100
targetNamespace: kube-system
68-
version: ${CILIUM_VERSION:=1.15.4}
101+
version: ${CILIUM_VERSION:=1.16.10}
69102
chart: cilium
70103
repo: https://helm.cilium.io/
71104
bootstrap: true
72105
valuesContent: |-
73106
bgpControlPlane:
74107
enabled: true
108+
routingMode: native
109+
kubeProxyReplacement: true
110+
ipv4NativeRoutingCIDR: ${VPC_NETWORK_CIDR:=10.0.0.0/8}
111+
ipv6NativeRoutingCIDR: ::/0
112+
tunnelProtocol: ""
113+
enableIPv4Masquerade: true
114+
enableIPv6Masquerade: false
75115
policyAuditMode: ${FW_AUDIT_ONLY:=true}
76116
hostFirewall:
77117
enabled: true
78118
extraConfig:
79119
allow-localhost: policy
120+
k8sServiceHost: 10.0.0.2
121+
k8sServicePort: 6443
122+
extraArgs:
123+
- --nodeport-addresses=0.0.0.0/0
80124
ipam:
81125
mode: kubernetes
82126
ipv4:
@@ -85,36 +129,68 @@ patches:
85129
enabled: true
86130
k8s:
87131
requireIPv4PodCIDR: true
132+
requireIPv6PodCIDR: true
88133
hubble:
89134
relay:
90135
enabled: true
91136
ui:
92137
enabled: true
93138
- target:
94-
group: controlplane.cluster.x-k8s.io
95-
version: v1beta2
96-
kind: KThreesControlPlane
97-
patch: |-
98-
- op: replace
99-
path: /spec/kthreesConfigSpec/preK3sCommands
100-
value:
101-
- |
102-
mkdir -p /etc/rancher/k3s/config.yaml.d/
103-
echo "node-ip: $(ip a s eth0 |grep -E 'inet ' |cut -d' ' -f6|cut -d/ -f1 | grep -E '192.168'),$(ip a s eth0 |grep -E 'inet6 ' |cut -d' ' -f6|cut -d/ -f1 | grep -vE 'fe80')" >> /etc/rancher/k3s/config.yaml.d/capi-config.yaml
104-
- sed -i '/swap/d' /etc/fstab
105-
- swapoff -a
106-
- hostnamectl set-hostname '{{ ds.meta_data.label }}' && hostname -F /etc/hostname
107-
- target:
108-
group: bootstrap.cluster.x-k8s.io
109-
version: v1beta2
110-
kind: KThreesConfigTemplate
139+
kind: HelmChartProxy
140+
name: .*-linode-cloud-controller-manager
111141
patch: |-
112142
- op: replace
113-
path: /spec/template/spec/preK3sCommands
114-
value:
115-
- |
116-
mkdir -p /etc/rancher/k3s/config.yaml.d/
117-
echo "node-ip: $(ip a s eth0 |grep -E 'inet ' |cut -d' ' -f6|cut -d/ -f1 | grep -E '192.168'),$(ip a s eth0 |grep -E 'inet6 ' |cut -d' ' -f6|cut -d/ -f1 | grep -vE 'fe80')" >> /etc/rancher/k3s/config.yaml.d/capi-config.yaml
118-
- sed -i '/swap/d' /etc/fstab
119-
- swapoff -a
120-
- hostnamectl set-hostname '{{ ds.meta_data.label }}' && hostname -F /etc/hostname
143+
path: /spec/valuesTemplate
144+
value: |
145+
routeController:
146+
vpcNames: {{ .InfraCluster.spec.vpcRef.name }}
147+
clusterCIDR: ${VPC_NETWORK_CIDR:=10.192.0.0/10}
148+
configureCloudRoutes: true
149+
secretRef:
150+
name: "linode-token-region"
151+
image:
152+
tag: v0.9.0
153+
pullPolicy: IfNotPresent
154+
enableNodeIPAM: true
155+
tolerations:
156+
# The CCM can run on Nodes tainted as masters
157+
- key: "node-role.kubernetes.io/control-plane"
158+
effect: "NoSchedule"
159+
# The CCM is a "critical addon"
160+
- key: "CriticalAddonsOnly"
161+
operator: "Exists"
162+
# This taint is set on all Nodes when an external CCM is used
163+
- key: node.cloudprovider.kubernetes.io/uninitialized
164+
value: "true"
165+
effect: NoSchedule
166+
- key: node.kubernetes.io/not-ready
167+
operator: Exists
168+
effect: NoSchedule
169+
- key: node.kubernetes.io/unreachable
170+
operator: Exists
171+
effect: NoSchedule
172+
- key: node.cilium.io/agent-not-ready
173+
operator: Exists
174+
effect: NoSchedule
175+
env:
176+
- name: LINODE_EXTERNAL_SUBNET
177+
value: ${LINODE_EXTERNAL_SUBNET:=""}
178+
- name: LINODE_URL
179+
value: ${LINODE_URL:="https://api.linode.com"}
180+
- name: SSL_CERT_DIR
181+
value: "/tls"
182+
- name: LINODE_API_VERSION
183+
value: v4beta
184+
- name: KUBERNETES_SERVICE_HOST
185+
value: "{{ .InfraCluster.spec.controlPlaneEndpoint.host }}"
186+
- name: KUBERNETES_SERVICE_PORT
187+
value: "{{ .InfraCluster.spec.controlPlaneEndpoint.port }}"
188+
volumeMounts:
189+
- name: cacert
190+
mountPath: /tls
191+
readOnly: true
192+
volumes:
193+
- name: cacert
194+
secret:
195+
secretName: linode-ca
196+
defaultMode: 420

0 commit comments

Comments
 (0)