KubeVIP cluster template
The kube-vip cluster-template will create a static pod running kube-vip in the control plane nodes. The control plane endpoint will be the VIP address managed by kube-vip.
Table Of Contents
Requirements
- A free IP address in the workload cluster network.
- The management cluster can connect to the VIP address (to be able to connect to the workload cluster).
Configuration
# Cluster version and size
export KUBERNETES_VERSION=v1.32.3
export CONTROL_PLANE_MACHINE_COUNT=1
export WORKER_MACHINE_COUNT=1
# Name of secret with server credentials
export LXC_SECRET_NAME=lxc-secret
## Kubernetes image to use (if using a custom image)
#export LXC_IMAGE_NAME=kubeadm/v1.31.4/ubuntu/24.04
# Load balancer configuration
export LXC_LOAD_BALANCER_ADDRESS=10.0.42.1 # unused IP to use for kube-vip
export LXC_LOAD_BALANCER_INTERFACE= # (optional) specify interface to bind vip
# Control plane machine configuration
export CONTROL_PLANE_MACHINE_TYPE=container # 'container' or 'virtual-machine'
export CONTROL_PLANE_MACHINE_FLAVOR=c2-m4 # instance type for control plane nodes
export CONTROL_PLANE_MACHINE_PROFILES=[default] # profiles for control plane nodes
export CONTROL_PLANE_MACHINE_DEVICES=[] # override devices for control plane nodes
# Worker machine configuration
export WORKER_MACHINE_TYPE=container # 'container' or 'virtual-machine'
export WORKER_MACHINE_FLAVOR=c2-m4 # instance type for worker nodes
export WORKER_MACHINE_PROFILES=[default] # profiles for worker nodes
export WORKER_MACHINE_DEVICES=[] # override devices for worker nodes
Generate cluster
clusterctl generate cluster example-cluster -i incus --flavor kube-vip
Cluster Template
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: ${CLUSTER_NAME}
spec:
clusterNetwork:
pods:
cidrBlocks: ${POD_CIDR:=[10.244.0.0/16]}
services:
cidrBlocks: ${SERVICE_CIDR:=[10.96.0.0/12]}
serviceDomain: cluster.local
controlPlaneRef:
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
name: ${CLUSTER_NAME}-control-plane
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LXCCluster
name: ${CLUSTER_NAME}
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LXCCluster
metadata:
name: ${CLUSTER_NAME}
spec:
secretRef:
name: ${LXC_SECRET_NAME}
controlPlaneEndpoint:
host: ${LXC_LOAD_BALANCER_ADDRESS}
port: 6443
loadBalancer:
external: {}
---
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
metadata:
name: ${CLUSTER_NAME}-control-plane
spec:
replicas: ${CONTROL_PLANE_MACHINE_COUNT}
version: ${KUBERNETES_VERSION}
machineTemplate:
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LXCMachineTemplate
name: ${CLUSTER_NAME}-control-plane
kubeadmConfigSpec:
preKubeadmCommands:
- set -x
# Workaround for https://github.com/kube-vip/kube-vip/issues/684, see https://github.com/kube-vip/kube-vip/issues/684#issuecomment-1883955927
- |
if [ -f /run/kubeadm/kubeadm.yaml ]; then
sed -i 's#path: /etc/kubernetes/admin.conf#path: /etc/kubernetes/super-admin.conf#' /etc/kubernetes/manifests/kube-vip.yaml
fi
# Workaround for kube-proxy failing to configure nf_conntrack_max_per_core on LXC
- |
if systemd-detect-virt -c -q 2>/dev/null; then
cat /run/kubeadm/hack-kube-proxy-config-lxc.yaml | tee -a /run/kubeadm/kubeadm.yaml
fi
# # Workaround for https://github.com/kube-vip/kube-vip/issues/684, see https://github.com/kube-vip/kube-vip/issues/684#issuecomment-1883955927
# # This reverts the previous change. It is disabled as it restarts kube-vip and causes flakiness during cluster setup
# postKubeadmCommands:
# - |
# if [ -f /run/kubeadm/kubeadm.yaml ]; then
# sed -i 's#path: /etc/kubernetes/super-admin.conf#path: /etc/kubernetes/admin.conf#' /etc/kubernetes/manifests/kube-vip.yaml
# fi
initConfiguration:
nodeRegistration:
kubeletExtraArgs:
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
fail-swap-on: "false"
provider-id: "lxc:///{{ v1.local_hostname }}"
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
fail-swap-on: "false"
provider-id: "lxc:///{{ v1.local_hostname }}"
files:
- content: |
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "6443"
- name: vip_interface
value: ${LXC_LOAD_BALANCER_INTERFACE:=""}
- name: vip_cidr
value: "32"
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_ddns
value: "false"
- name: svc_enable
value: "true"
- name: svc_leasename
value: plndr-svcs-lock
- name: svc_election
value: "true"
- name: vip_leaderelection
value: "true"
- name: vip_leasename
value: plndr-cp-lock
- name: vip_leaseduration
value: "15"
- name: vip_renewdeadline
value: "10"
- name: vip_retryperiod
value: "2"
- name: address
value: ${LXC_LOAD_BALANCER_ADDRESS}
- name: prometheus_server
value: :2112
image: ghcr.io/kube-vip/kube-vip:v0.6.4
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
- mountPath: /etc/hosts
name: etchosts
hostNetwork: true
volumes:
- hostPath:
path: /etc/kubernetes/admin.conf
name: kubeconfig
- hostPath:
path: /etc/kube-vip.hosts
type: File
name: etchosts
status: {}
owner: root:root
path: /etc/kubernetes/manifests/kube-vip.yaml
permissions: "0644"
- content: 127.0.0.1 localhost kubernetes
owner: root:root
path: /etc/kube-vip.hosts
permissions: "0644"
- path: /run/kubeadm/hack-kube-proxy-config-lxc.yaml
content: |
---
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
conntrack:
maxPerCore: 0
owner: root:root
permissions: "0444"
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LXCMachineTemplate
metadata:
name: ${CLUSTER_NAME}-control-plane
spec:
template:
spec:
instanceType: ${CONTROL_PLANE_MACHINE_TYPE}
flavor: ${CONTROL_PLANE_MACHINE_FLAVOR}
profiles: ${CONTROL_PLANE_MACHINE_PROFILES:=[default]}
devices: ${CONTROL_PLANE_MACHINE_DEVICES:=[]}
image:
name: ${LXC_IMAGE_NAME:=""}
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
name: ${CLUSTER_NAME}-md-0
spec:
clusterName: ${CLUSTER_NAME}
replicas: ${WORKER_MACHINE_COUNT}
selector:
matchLabels:
template:
spec:
version: ${KUBERNETES_VERSION}
clusterName: ${CLUSTER_NAME}
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
name: ${CLUSTER_NAME}-md-0
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LXCMachineTemplate
name: ${CLUSTER_NAME}-md-0
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LXCMachineTemplate
metadata:
name: ${CLUSTER_NAME}-md-0
spec:
template:
spec:
instanceType: ${WORKER_MACHINE_TYPE}
flavor: ${WORKER_MACHINE_FLAVOR}
profiles: ${WORKER_MACHINE_PROFILES:=[default]}
devices: ${WORKER_MACHINE_DEVICES:=[]}
image:
name: ${LXC_IMAGE_NAME:=""}
---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
name: ${CLUSTER_NAME}-md-0
spec:
template:
spec:
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
fail-swap-on: "false"
provider-id: "lxc:///{{ v1.local_hostname }}"