Remove deprecated Argo CD application configurations and related files for noble cluster, including root-application.yaml, kustomization.yaml, and individual application manifests for argocd, cilium, longhorn, kube-vip, and monitoring components. Update kube-vip daemonset.yaml to enhance deployment strategy and environment variables for improved configuration.

This commit is contained in:
Nikholas Pcenicni
2026-03-27 23:02:17 -04:00
parent 4263da65d8
commit d2c53fc553
37 changed files with 778 additions and 1042 deletions

View File

@@ -1,56 +1,92 @@
# yaml-language-server: $schema=../talconfig.json
# Restore target after GPT wipe: `cp talconfig.with-longhorn.yaml talconfig.yaml` then `talhelper genconfig -o out` and apply all nodes.
# Noble lab — Talos machine configs via talhelper.
# 1) talhelper gensecret > talsecret.yaml # or SOPS-encrypt to talsecret.sops.yaml (do not commit)
# 2) talhelper genconfig -o out # writes to talos/out/ (gitignored from repo root)
# 3) talosctl apply-config --insecure -n <ip> --file out/noble-<host>.yaml
#
# installDisk: confirm with `talosctl disks -n <ip> --insecure` (Proxmox virtio is often /dev/sda).
# Longhorn data disk: second disk (often /dev/sdb SCSI or /dev/vdb virtio) → XFS at /var/mnt/longhorn.
# After changing schematic/extensions: regenerate configs, upgrade nodes with new installer image, then reboot if needed.
# Helm must set defaultDataPath to /var/mnt/longhorn (see clusters/noble/apps/longhorn/values.yaml).
#
# Image Factory schematic (iscsi-tools + util-linux-tools), nocloud installer:
# factory.talos.dev/nocloud-installer/249d9135de54962744e917cfe654117000cba369f9152fbab9d055a00aa3664f:v1.12.6
# After edits, run talhelper genconfig — `machine.install.image` in out/*.yaml should match this schematic (path may be metal-installer/ etc. on bare metal).
# Upgrade: talosctl upgrade --image <same-as-machine.install.image-in-out> -n <node-ip>
clusterName: noble
talosVersion: v1.12.6
endpoint: https://192.168.50.230:6443
talosVersion: v1.12.5
kubernetesVersion: v1.31.1
allowSchedulingOnControlPlanes: true
# kube-vip fronts the Kubernetes API at this IP (see clusters/noble/apps/kube-vip).
# Without these SANs, TLS to https://192.168.50.230:6443 fails (cert does not match).
# Talos API (talosctl -e) also uses endpoint; include VIP in machine cert SANs.
additionalApiServerCertSans:
- 192.168.50.230
- kube.noble.lab.pcenicni.dev
additionalMachineCertSans:
- 192.168.50.230
# Use Cilium installed via GitOps (no bundled Talos CNI).
cniConfig:
name: none
clusterPodNets:
- 10.244.0.0/16
clusterSvcNets:
- 10.96.0.0/12
# Secondary disk on every node (OS stays on installDisk: /dev/sda).
# Mount matches Longhorn defaultDataPath in clusters/noble/apps/longhorn/application.yaml.
patches:
- |-
machine:
disks:
- device: /dev/sdb
partitions:
- mountpoint: /var/mnt/longhorn
- noble.lab
- kube.noble.lab
nodes:
- hostname: noble-cp-1
ipAddress: 192.168.50.20
controlPlane: true
installDisk: /dev/sda
- hostname: noble-cp-2
ipAddress: 192.168.50.30
controlPlane: true
installDisk: /dev/sda
- hostname: noble-cp-3
ipAddress: 192.168.50.40
controlPlane: true
installDisk: /dev/sda
- hostname: noble-worker-1
- hostname: helium
ipAddress: 192.168.50.10
controlPlane: false
installDisk: /dev/sda
talosImageURL: &noble-installer factory.talos.dev/nocloud-installer/249d9135de54962744e917cfe654117000cba369f9152fbab9d055a00aa3664f
- hostname: neon
ipAddress: 192.168.50.20
controlPlane: true
installDisk: /dev/sda
talosImageURL: *noble-installer
- hostname: argon
ipAddress: 192.168.50.30
controlPlane: true
installDisk: /dev/sda
talosImageURL: *noble-installer
- hostname: krypton
ipAddress: 192.168.50.40
controlPlane: true
installDisk: /dev/sda
talosImageURL: *noble-installer
controlPlane:
schematic: &noble-schematic
customization:
systemExtensions:
officialExtensions:
- siderolabs/iscsi-tools
- siderolabs/util-linux-tools
userVolumes:
- &longhorn-data
name: longhorn
# Whole dedicated disk (no partition min/max math). Avoids "not enough space" when
# grow+maxSize:100% on a separate data disk incorrectly fails provisioning.
volumeType: disk
provisioning:
diskSelector:
# Proxmox virtio SCSI: second disk is often vdb, not sdb. Prefer WWN/serial in prod.
match: disk.dev_path == '/dev/sdb' || disk.dev_path == '/dev/vdb'
filesystem:
type: xfs
worker:
schematic: *noble-schematic
userVolumes:
- *longhorn-data
patches:
- |-
cluster:
network:
cni:
name: none
machine:
kubelet:
extraMounts:
- destination: /var/mnt/longhorn
type: bind
source: /var/mnt/longhorn
options:
- bind
- rshared
- rw
# Chart DaemonSet hostPath is /var/lib/longhorn (not configurable in Helm 1.11.x).
- destination: /var/lib/longhorn
type: bind
source: /var/mnt/longhorn
options:
- bind
- rshared
- rw