OVN network load balancer cluster template
This cluster template will provision an OVN network load balancer to forward traffic to control plane machines on the cluster. The control plane endpoint will be the listen IP address of the network load balancer.
Table Of Contents
Requirements
- Incus configured with OVN.
- A free IP address in the OVN uplink network.
- The management cluster can reach the OVN uplink network (to be able to connect to the workload cluster).
Configuration
NOTE: make sure that the instance profiles will use the OVN network for the instance networking.
# Cluster version and size
export KUBERNETES_VERSION=v1.32.3
export CONTROL_PLANE_MACHINE_COUNT=1
export WORKER_MACHINE_COUNT=1
# Name of secret with server credentials
export LXC_SECRET_NAME=lxc-secret
## Kubernetes image to use (if using a custom image)
#export LXC_IMAGE_NAME=kubeadm/v1.31.4/ubuntu/24.04
# Load balancer configuration
export LXC_LOAD_BALANCER_ADDRESS=10.100.42.1 # free IP address in the ovn uplink network
export LXC_LOAD_BALANCER_NETWORK=ovn0 # name of the ovn network used by the instances
# Control plane machine configuration
export CONTROL_PLANE_MACHINE_TYPE=container # 'container' or 'virtual-machine'
export CONTROL_PLANE_MACHINE_FLAVOR=c2-m4 # instance type for control plane nodes
export CONTROL_PLANE_MACHINE_PROFILES=[default] # profiles for control plane nodes
export CONTROL_PLANE_MACHINE_DEVICES=[] # override devices for control plane nodes
# Worker machine configuration
export WORKER_MACHINE_TYPE=container # 'container' or 'virtual-machine'
export WORKER_MACHINE_FLAVOR=c2-m4 # instance type for worker nodes
export WORKER_MACHINE_PROFILES=[default] # profiles for worker nodes
export WORKER_MACHINE_DEVICES=[] # override devices for worker nodes
Generate cluster
clusterctl generate cluster example-cluster -i incus --flavor ovn
Cluster Template
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: ${CLUSTER_NAME}
spec:
clusterNetwork:
pods:
cidrBlocks: ${POD_CIDR:=[10.244.0.0/16]}
services:
cidrBlocks: ${SERVICE_CIDR:=[10.96.0.0/12]}
serviceDomain: cluster.local
controlPlaneRef:
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
name: ${CLUSTER_NAME}-control-plane
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LXCCluster
name: ${CLUSTER_NAME}
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LXCCluster
metadata:
name: ${CLUSTER_NAME}
spec:
secretRef:
name: ${LXC_SECRET_NAME}
controlPlaneEndpoint:
host: ${LXC_LOAD_BALANCER_ADDRESS}
port: 6443
loadBalancer:
ovn:
networkName: ${LXC_LOAD_BALANCER_NETWORK}
---
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
metadata:
name: ${CLUSTER_NAME}-control-plane
spec:
replicas: ${CONTROL_PLANE_MACHINE_COUNT}
version: ${KUBERNETES_VERSION}
machineTemplate:
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LXCMachineTemplate
name: ${CLUSTER_NAME}-control-plane
kubeadmConfigSpec:
preKubeadmCommands:
- set -x
# Workaround for kube-proxy failing to configure nf_conntrack_max_per_core on LXC
- |
if systemd-detect-virt -c -q 2>/dev/null; then
cat /run/kubeadm/hack-kube-proxy-config-lxc.yaml | tee -a /run/kubeadm/kubeadm.yaml
fi
initConfiguration:
nodeRegistration:
kubeletExtraArgs:
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
fail-swap-on: "false"
provider-id: "lxc:///{{ v1.local_hostname }}"
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
fail-swap-on: "false"
provider-id: "lxc:///{{ v1.local_hostname }}"
files:
- path: /run/kubeadm/hack-kube-proxy-config-lxc.yaml
content: |
---
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
conntrack:
maxPerCore: 0
owner: root:root
permissions: "0444"
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LXCMachineTemplate
metadata:
name: ${CLUSTER_NAME}-control-plane
spec:
template:
spec:
instanceType: ${CONTROL_PLANE_MACHINE_TYPE}
flavor: ${CONTROL_PLANE_MACHINE_FLAVOR}
profiles: ${CONTROL_PLANE_MACHINE_PROFILES:=[default]}
devices: ${CONTROL_PLANE_MACHINE_DEVICES:=[]}
image:
name: ${LXC_IMAGE_NAME:=""}
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
name: ${CLUSTER_NAME}-md-0
spec:
clusterName: ${CLUSTER_NAME}
replicas: ${WORKER_MACHINE_COUNT}
selector:
matchLabels:
template:
spec:
version: ${KUBERNETES_VERSION}
clusterName: ${CLUSTER_NAME}
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
name: ${CLUSTER_NAME}-md-0
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LXCMachineTemplate
name: ${CLUSTER_NAME}-md-0
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LXCMachineTemplate
metadata:
name: ${CLUSTER_NAME}-md-0
spec:
template:
spec:
instanceType: ${WORKER_MACHINE_TYPE}
flavor: ${WORKER_MACHINE_FLAVOR}
profiles: ${WORKER_MACHINE_PROFILES:=[default]}
devices: ${WORKER_MACHINE_DEVICES:=[]}
image:
name: ${LXC_IMAGE_NAME:=""}
---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
name: ${CLUSTER_NAME}-md-0
spec:
template:
spec:
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
fail-swap-on: "false"
provider-id: "lxc:///{{ v1.local_hostname }}"