Ubuntu cluster template

The ubuntu cluster template is the same as the development cluster template, but works with an upstream Ubuntu 24.04 instance and installs kubeadm during cloud-init.

WARNING: The load balancer container is a single point of failure for the control plane of the workload cluster, therefore should only be used for development or evaluation purposes.

WARNING: cloud-init will download all binaries on all nodes while deploying the cluster. This is wasteful and will take longer than using a base image.

Table Of Contents

Requirements

  1. The instance network is reachable by the management controller.
  2. Instances can reach GitHub to pull binaries and install kubeadm.

Configuration

# Cluster version and size
export KUBERNETES_VERSION=v1.32.3
export CONTROL_PLANE_MACHINE_COUNT=1
export WORKER_MACHINE_COUNT=1

# Name of secret with server credentials
export LXC_SECRET_NAME=lxc-secret

# Ubuntu image to use. You can use `ubuntu:VERSION`, which resolves to:
# - Incus:  Image `ubuntu/VERSION/cloud` from https://images.linuxcontainers.org
# - LXD:    Image `VERSION` from https://cloud-images.ubuntu.com/releases
export LXC_IMAGE_NAME="ubuntu:24.04"

# Load balancer configuration
export LXC_LOAD_BALANCER_TYPE=lxc               # 'lxc' or 'oci'
export LOAD_BALANCER_MACHINE_PROFILES=[default] # profiles for the lb container
export LOAD_BALANCER_MACHINE_FLAVOR=c1-m1       # instance type for the lb container

# Control plane machine configuration
export CONTROL_PLANE_MACHINE_TYPE=container     # 'container' or 'virtual-machine'
export CONTROL_PLANE_MACHINE_FLAVOR=c2-m4       # instance type for control plane nodes
export CONTROL_PLANE_MACHINE_PROFILES=[default] # profiles for control plane nodes
export CONTROL_PLANE_MACHINE_DEVICES=[]         # override devices for control plane nodes

# Worker machine configuration
export WORKER_MACHINE_TYPE=container            # 'container' or 'virtual-machine'
export WORKER_MACHINE_FLAVOR=c2-m4              # instance type for worker nodes
export WORKER_MACHINE_PROFILES=[default]        # profiles for worker nodes
export WORKER_MACHINE_DEVICES=[]                # override devices for worker nodes

Generate cluster

clusterctl generate cluster example-cluster -i incus --flavor ubuntu

Cluster Template

apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
  name: ${CLUSTER_NAME}
spec:
  clusterNetwork:
    pods:
      cidrBlocks: ${POD_CIDR:=[10.244.0.0/16]}
    services:
      cidrBlocks: ${SERVICE_CIDR:=[10.96.0.0/12]}
    serviceDomain: cluster.local
  controlPlaneRef:
    apiVersion: controlplane.cluster.x-k8s.io/v1beta1
    kind: KubeadmControlPlane
    name: ${CLUSTER_NAME}-control-plane
  infrastructureRef:
    apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
    kind: LXCCluster
    name: ${CLUSTER_NAME}
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LXCCluster
metadata:
  name: ${CLUSTER_NAME}
spec:
  secretRef:
    name: ${LXC_SECRET_NAME}
  loadBalancer:
    lxc:
      instanceSpec:
        flavor: ${LOAD_BALANCER_MACHINE_FLAVOR:=""}
        profiles: ${LOAD_BALANCER_MACHINE_PROFILES:=[default]}
---
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
metadata:
  name: ${CLUSTER_NAME}-control-plane
spec:
  replicas: ${CONTROL_PLANE_MACHINE_COUNT}
  version: ${KUBERNETES_VERSION}
  machineTemplate:
    infrastructureRef:
      apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
      kind: LXCMachineTemplate
      name: ${CLUSTER_NAME}-control-plane
  kubeadmConfigSpec:
    preKubeadmCommands:
    - set -x
    # Workaround for kube-proxy failing to configure nf_conntrack_max_per_core on LXC
    - |
      if systemd-detect-virt -c -q 2>/dev/null; then
        cat /run/kubeadm/hack-kube-proxy-config-lxc.yaml | tee -a /run/kubeadm/kubeadm.yaml
      fi
    # Install kubeadm
    - sh /opt/cluster-api/install-kubeadm.sh "${KUBERNETES_VERSION}"
    initConfiguration:
      nodeRegistration:
        kubeletExtraArgs:
          eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
          fail-swap-on: "false"
          provider-id: "lxc:///{{ v1.local_hostname }}"
    joinConfiguration:
      nodeRegistration:
        kubeletExtraArgs:
          eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
          fail-swap-on: "false"
          provider-id: "lxc:///{{ v1.local_hostname }}"
    files:
    - path: /run/kubeadm/hack-kube-proxy-config-lxc.yaml
      content: |
        ---
        kind: KubeProxyConfiguration
        apiVersion: kubeproxy.config.k8s.io/v1alpha1
        conntrack:
          maxPerCore: 0
      owner: root:root
      permissions: "0444"
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LXCMachineTemplate
metadata:
  name: ${CLUSTER_NAME}-control-plane
spec:
  template:
    spec:
      instanceType: ${CONTROL_PLANE_MACHINE_TYPE}
      flavor: ${CONTROL_PLANE_MACHINE_FLAVOR}
      profiles: ${CONTROL_PLANE_MACHINE_PROFILES:=[default]}
      devices: ${CONTROL_PLANE_MACHINE_DEVICES:=[]}
      image:
        name: ${LXC_IMAGE_NAME}
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
  name: ${CLUSTER_NAME}-md-0
spec:
  clusterName: ${CLUSTER_NAME}
  replicas: ${WORKER_MACHINE_COUNT}
  selector:
    matchLabels:
  template:
    spec:
      version: ${KUBERNETES_VERSION}
      clusterName: ${CLUSTER_NAME}
      bootstrap:
        configRef:
          apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
          kind: KubeadmConfigTemplate
          name: ${CLUSTER_NAME}-md-0
      infrastructureRef:
        apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
        kind: LXCMachineTemplate
        name: ${CLUSTER_NAME}-md-0
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LXCMachineTemplate
metadata:
  name: ${CLUSTER_NAME}-md-0
spec:
  template:
    spec:
      instanceType: ${WORKER_MACHINE_TYPE}
      flavor: ${WORKER_MACHINE_FLAVOR}
      profiles: ${WORKER_MACHINE_PROFILES:=[default]}
      devices: ${WORKER_MACHINE_DEVICES:=[]}
      image:
        name: ${LXC_IMAGE_NAME}
---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
  name: ${CLUSTER_NAME}-md-0
spec:
  template:
    spec:
      preKubeadmCommands:
      - set -x
      # Install kubeadm
      - sh /opt/cluster-api/install-kubeadm.sh "${KUBERNETES_VERSION}"
      joinConfiguration:
        nodeRegistration:
          kubeletExtraArgs:
            eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
            fail-swap-on: "false"
            provider-id: "lxc:///{{ v1.local_hostname }}"