Development cluster template
The development cluster template will create an LXC or OCI container running a haproxy server for the cluster load balancer endpoint. The load balancer endpoint will be the IP address of the haproxy container.
WARNING: The load balancer container is a single point of failure for the control plane of the workload cluster, therefore should only be used for development or evaluation purposes.
Table Of Contents
Requirements
- The instance network is reachable by the management controller.
Configuration
# Cluster version and size
export KUBERNETES_VERSION=v1.32.3
export CONTROL_PLANE_MACHINE_COUNT=1
export WORKER_MACHINE_COUNT=1
# Name of secret with server credentials
export LXC_SECRET_NAME=lxc-secret
## Kubernetes image to use (if using a custom image)
#export LXC_IMAGE_NAME=kubeadm/v1.31.4/ubuntu/24.04
# Load balancer configuration
export LXC_LOAD_BALANCER_TYPE=lxc # must be 'lxc' or 'oci'
export LOAD_BALANCER_MACHINE_PROFILES=[default] # profiles for the lb container
export LOAD_BALANCER_MACHINE_FLAVOR=c1-m1 # instance type for the lb container
# Control plane machine configuration
export CONTROL_PLANE_MACHINE_TYPE=container # 'container' or 'virtual-machine'
export CONTROL_PLANE_MACHINE_FLAVOR=c2-m4 # instance type for control plane nodes
export CONTROL_PLANE_MACHINE_PROFILES=[default] # profiles for control plane nodes
export CONTROL_PLANE_MACHINE_DEVICES=[] # override devices for control plane nodes
# Worker machine configuration
export WORKER_MACHINE_TYPE=container # 'container' or 'virtual-machine'
export WORKER_MACHINE_FLAVOR=c2-m4 # instance type for worker nodes
export WORKER_MACHINE_PROFILES=[default] # profiles for worker nodes
export WORKER_MACHINE_DEVICES=[] # override devices for worker nodes
Generate cluster
clusterctl generate cluster example-cluster -i incus --flavor development
Cluster Template
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: ${CLUSTER_NAME}
spec:
clusterNetwork:
pods:
cidrBlocks: ${POD_CIDR:=[10.244.0.0/16]}
services:
cidrBlocks: ${SERVICE_CIDR:=[10.96.0.0/12]}
serviceDomain: cluster.local
controlPlaneRef:
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
name: ${CLUSTER_NAME}-control-plane
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LXCCluster
name: ${CLUSTER_NAME}
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LXCCluster
metadata:
name: ${CLUSTER_NAME}
spec:
secretRef:
name: ${LXC_SECRET_NAME}
loadBalancer:
${LXC_LOAD_BALANCER_TYPE:=lxc}:
instanceSpec:
flavor: ${LOAD_BALANCER_MACHINE_FLAVOR:=""}
profiles: ${LOAD_BALANCER_MACHINE_PROFILES:=[default]}
---
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
metadata:
name: ${CLUSTER_NAME}-control-plane
spec:
replicas: ${CONTROL_PLANE_MACHINE_COUNT}
version: ${KUBERNETES_VERSION}
machineTemplate:
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LXCMachineTemplate
name: ${CLUSTER_NAME}-control-plane
kubeadmConfigSpec:
preKubeadmCommands:
- set -x
# Workaround for kube-proxy failing to configure nf_conntrack_max_per_core on LXC
- |
if systemd-detect-virt -c -q 2>/dev/null; then
cat /run/kubeadm/hack-kube-proxy-config-lxc.yaml | tee -a /run/kubeadm/kubeadm.yaml
fi
initConfiguration:
nodeRegistration:
kubeletExtraArgs:
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
fail-swap-on: "false"
provider-id: "lxc:///{{ v1.local_hostname }}"
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
fail-swap-on: "false"
provider-id: "lxc:///{{ v1.local_hostname }}"
files:
- path: /run/kubeadm/hack-kube-proxy-config-lxc.yaml
content: |
---
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
conntrack:
maxPerCore: 0
owner: root:root
permissions: "0444"
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LXCMachineTemplate
metadata:
name: ${CLUSTER_NAME}-control-plane
spec:
template:
spec:
instanceType: ${CONTROL_PLANE_MACHINE_TYPE}
flavor: ${CONTROL_PLANE_MACHINE_FLAVOR}
profiles: ${CONTROL_PLANE_MACHINE_PROFILES:=[default]}
devices: ${CONTROL_PLANE_MACHINE_DEVICES:=[]}
image:
name: ${LXC_IMAGE_NAME:=""}
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
name: ${CLUSTER_NAME}-md-0
spec:
clusterName: ${CLUSTER_NAME}
replicas: ${WORKER_MACHINE_COUNT}
selector:
matchLabels:
template:
spec:
version: ${KUBERNETES_VERSION}
clusterName: ${CLUSTER_NAME}
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
name: ${CLUSTER_NAME}-md-0
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LXCMachineTemplate
name: ${CLUSTER_NAME}-md-0
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LXCMachineTemplate
metadata:
name: ${CLUSTER_NAME}-md-0
spec:
template:
spec:
instanceType: ${WORKER_MACHINE_TYPE}
flavor: ${WORKER_MACHINE_FLAVOR}
profiles: ${WORKER_MACHINE_PROFILES:=[default]}
devices: ${WORKER_MACHINE_DEVICES:=[]}
image:
name: ${LXC_IMAGE_NAME:=""}
---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
name: ${CLUSTER_NAME}-md-0
spec:
template:
spec:
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
fail-swap-on: "false"
provider-id: "lxc:///{{ v1.local_hostname }}"