Update .gitignore and refactor Ubuntu template playbook to use role for Proxmox template management

This commit is contained in:
Nikholas Pcenicni
2026-03-27 03:48:32 -04:00
parent 87e71dcd8a
commit 51d55af86e
23 changed files with 676 additions and 65 deletions

BIN
.DS_Store vendored Normal file

Binary file not shown.

7
.gitignore vendored
View File

@@ -1 +1,6 @@
ansible/inventory/hosts.ini
ansible/inventory/hosts.ini
# Talos generated
talos/out/
# Local secrets
age-key.txt

View File

@@ -2,71 +2,15 @@
- name: Create Ubuntu Cloud-Init Template
hosts: proxmox
become: yes
vars:
template_id: 9000
template_name: ubuntu-2204-cloud
# URL for Ubuntu 22.04 Cloud Image (Jammy)
image_url: "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
image_name: "ubuntu-22.04-server-cloudimg-amd64.img"
storage_pool: "local-lvm"
memory: 2048
cores: 2
# Override defaults if needed
image_alias: ubuntu-22.04
storage_pool: local-lvm
tasks:
- name: Check if template already exists
command: "qm status {{ template_id }}"
register: vm_status
failed_when: false
changed_when: false
- name: Fail if template ID exists
fail:
msg: "VM ID {{ template_id }} already exists. Please choose a different ID or delete the existing VM."
when: vm_status.rc == 0
- name: Download Ubuntu Cloud Image
get_url:
url: "{{ image_url }}"
dest: "/tmp/{{ image_name }}"
mode: '0644'
- name: Install libguestfs-tools (required for virt-customize if needed, optional)
apt:
name: libguestfs-tools
state: present
ignore_errors: yes
- name: Create VM with hardware config
command: >
qm create {{ template_id }}
--name "{{ template_name }}"
--memory {{ memory }}
--core {{ cores }}
--net0 virtio,bridge=vmbr0
--scsihw virtio-scsi-pci
--ostype l26
--serial0 socket --vga serial0
- name: Import Disk
command: "qm importdisk {{ template_id }} /tmp/{{ image_name }} {{ storage_pool }}"
- name: Attach Disk to SCSI
command: "qm set {{ template_id }} --scsi0 {{ storage_pool }}:vm-{{ template_id }}-disk-0"
- name: Add Cloud-Init Drive
command: "qm set {{ template_id }} --ide2 {{ storage_pool }}:cloudinit"
- name: Set Boot Order
command: "qm set {{ template_id }} --boot c --bootdisk scsi0"
- name: Resize Disk (Optional, e.g. 10G)
command: "qm resize {{ template_id }} scsi0 10G"
ignore_errors: yes
- name: Convert to Template
command: "qm template {{ template_id }}"
- name: Remove Downloaded Image
file:
path: "/tmp/{{ image_name }}"
state: absent
- name: Run Proxmox Template Manage Role
include_role:
name: proxmox_template_manage

View File

@@ -0,0 +1,33 @@
---
- name: Hello World Provisioning
hosts: localhost # Run API calls from control node
gather_facts: no
vars_files:
- "../inventory/hosts.ini" # Load connection details if needed manually, OR rely on inventory
vars:
# Target Proxmox Details (override from inventory/extra vars)
proxmox_api_host: "192.168.50.100"
proxmox_api_user: "root@pam"
proxmox_api_password: "Hemroid8" # Consider moving to Vault!
proxmox_node: "mercury"
# VM Spec
vmid: 101
vm_name: "hello-world-vm"
template_name: "ubuntu-2204-cloud"
ci_user: "ubuntu"
# Replace with your actual public key or pass via -e "ssh_key=..."
ssh_keys:
- "{{ ssh_key | default('ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAI...') }}"
tasks:
- name: Run Proxmox Provision Role
include_role:
name: proxmox_provision
vars:
vmid: "{{ vmid }}"
vm_name: "{{ vm_name }}"
template_name: "{{ template_name }}"
ci_user: "{{ ci_user }}"
ssh_keys: "{{ ssh_keys }}"

View File

@@ -0,0 +1,23 @@
---
# Defaults for proxmox_provision role
# Connection Details (fallbacks, but ideally inherited from inventory group_vars)
proxmox_api_host: "{{ ansible_host | default(inventory_hostname) }}"
proxmox_node: "{{ inventory_hostname }}"
# VM Details
vmid: 0 # 0 lets Proxmox choose next available, or specify fixed ID
vm_name: "new-vm"
template_name: "ubuntu-2204-cloud"
vm_memory: 2048
vm_cores: 2
vm_storage: "local-lvm"
vm_net_bridge: "vmbr0"
# Cloud Init / User Data
ci_user: "ubuntu"
# ssh_keys should be a list of public keys
ssh_keys: []
# State
vm_state: started

View File

@@ -0,0 +1,39 @@
---
- name: Provision VM from Template
community.general.proxmox_kvm:
api_host: "{{ proxmox_api_host }}"
api_user: "{{ proxmox_api_user }}"
api_password: "{{ proxmox_api_password }}"
# Use remote host verification if you have valid certs, else ignore
validate_certs: false
node: "{{ proxmox_node }}"
vmid: "{{ vmid if vmid | int > 0 else omit }}"
name: "{{ vm_name }}"
clone: "{{ template_name }}"
full: true # Full clone
cores: "{{ vm_cores }}"
memory: "{{ vm_memory }}"
storage: "{{ vm_storage }}"
net:
net0: "virtio,bridge={{ vm_net_bridge }}"
# Cloud Init
ciuser: "{{ ci_user }}"
sshkeys: "{{ ssh_keys | join('\n') }}"
ipconfig:
ipconfig0: "ip=dhcp"
state: "{{ vm_state }}"
register: vm_provision_result
- name: Debug Provision Result
debug:
var: vm_provision_result
# Note: Waiting for SSH requires knowing the IP.
# If qemu-guest-agent is installed in the template, we can fetch it.
# Otherwise, we might need a fixed IP or DNS check.

View File

@@ -0,0 +1,41 @@
---
# Defaults for proxmox_template_manage role
# Target Proxmox Node (where commands run)
proxmox_node: "{{ inventory_hostname }}"
# Storage Pool for Disks
storage_pool: local-lvm
# Template ID and Name
template_id: 9000
template_name: ubuntu-2204-cloud-template
# Hardware Specs
template_memory: 2048
template_cores: 2
# Image Source
# Options: 'list' (use image_alias) or 'url' (use custom_image_url)
image_source_type: list
image_list:
ubuntu-22.04:
url: "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
filename: "ubuntu-22.04-server-cloudimg-amd64.img"
ubuntu-24.04:
url: "https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img"
filename: "ubuntu-24.04-server-cloudimg-amd64.img"
debian-12:
url: "https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-generic-amd64.qcow2"
filename: "debian-12-generic-amd64.qcow2"
image_alias: ubuntu-22.04
custom_image_url: ""
custom_image_name: "custom-image.img"
# Cloud Init / SSH
# Optional: Embed a default admin key into the template
embed_admin_ssh_key: false
admin_ssh_key: ""

View File

@@ -0,0 +1,90 @@
---
- name: Resolve Image Variables (List)
set_fact:
_image_url: "{{ image_list[image_alias].url }}"
_image_name: "{{ image_list[image_alias].filename }}"
when: image_source_type == 'list'
- name: Resolve Image Variables (URL)
set_fact:
_image_url: "{{ custom_image_url }}"
_image_name: "{{ custom_image_name }}"
when: image_source_type == 'url'
- name: Check if template already exists
command: "qm status {{ template_id }}"
register: vm_status
failed_when: false
changed_when: false
- name: Fail if template ID exists
fail:
msg: "VM ID {{ template_id }} already exists. Please delete it or choose a different ID."
when: vm_status.rc == 0
- name: Download Cloud Image
get_url:
url: "{{ _image_url }}"
dest: "/tmp/{{ _image_name }}"
mode: '0644'
- name: Install libguestfs-tools (for virt-customize if needed)
apt:
name: libguestfs-tools
state: present
ignore_errors: yes
- name: Create VM with hardware config
command: >
qm create {{ template_id }}
--name "{{ template_name }}"
--memory {{ template_memory }}
--core {{ template_cores }}
--net0 virtio,bridge=vmbr0
--scsihw virtio-scsi-pci
--ostype l26
--serial0 socket --vga serial0
- name: Import Disk
command: "qm importdisk {{ template_id }} /tmp/{{ _image_name }} {{ storage_pool }}"
- name: Attach Disk to SCSI
command: "qm set {{ template_id }} --scsi0 {{ storage_pool }}:vm-{{ template_id }}-disk-0"
- name: Add Cloud-Init Drive
command: "qm set {{ template_id }} --ide2 {{ storage_pool }}:cloudinit"
- name: Set Boot Order
command: "qm set {{ template_id }} --boot c --bootdisk scsi0"
- name: Configure Cloud-Init (Optional Admin Key)
block:
- name: Prepare SSH Keys File
copy:
content: "{{ admin_ssh_key }}"
dest: "/tmp/ssh_key_{{ template_id }}.pub"
mode: '0600'
- name: Set SSH Keys on Template
command: "qm set {{ template_id }} --sshkeys /tmp/ssh_key_{{ template_id }}.pub"
- name: Cleanup Key File
file:
path: "/tmp/ssh_key_{{ template_id }}.pub"
state: absent
when: embed_admin_ssh_key | bool and admin_ssh_key | length > 0
- name: Set Cloud-Init IP Config (DHCP)
command: "qm set {{ template_id }} --ipconfig0 ip=dhcp"
- name: Resize Disk (to Minimum 10G)
command: "qm resize {{ template_id }} scsi0 10G"
ignore_errors: yes
- name: Convert to Template
command: "qm template {{ template_id }}"
- name: Remove Downloaded Image
file:
path: "/tmp/{{ _image_name }}"
state: absent

View File

@@ -0,0 +1,23 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: argocd
namespace: argocd
annotations:
argocd.argoproj.io/sync-wave: "-2"
spec:
project: default
destination:
server: https://kubernetes.default.svc
namespace: argocd
source:
repoURL: https://gitea.pcenicni.ca/gsdavidp/home-server.git
targetRevision: HEAD
path: clusters/noble/bootstrap/argocd
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@@ -0,0 +1,53 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cilium
namespace: argocd
annotations:
argocd.argoproj.io/sync-wave: "0"
spec:
project: default
destination:
server: https://kubernetes.default.svc
namespace: kube-system
sources:
- repoURL: https://helm.cilium.io/
chart: cilium
targetRevision: 1.16.6
helm:
valuesObject:
k8sServiceHost: 192.168.50.20
k8sServicePort: 6443
cgroup:
autoMount:
enabled: false
hostRoot: /sys/fs/cgroup
ipam:
operator:
clusterPoolIPv4PodCIDRList:
- 10.244.0.0/16
securityContext:
capabilities:
ciliumAgent:
- CHOWN
- KILL
- NET_ADMIN
- NET_RAW
- IPC_LOCK
- SYS_ADMIN
- SYS_RESOURCE
- DAC_OVERRIDE
- FOWNER
- SETGID
- SETUID
cleanCiliumState:
- NET_ADMIN
- SYS_ADMIN
- SYS_RESOURCE
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@@ -0,0 +1,23 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kube-vip
namespace: argocd
annotations:
argocd.argoproj.io/sync-wave: "-1"
spec:
project: default
destination:
server: https://kubernetes.default.svc
namespace: kube-system
source:
repoURL: https://gitea.pcenicni.ca/gsdavidp/home-server.git
targetRevision: HEAD
path: clusters/noble/apps/kube-vip
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- vip-rbac.yaml
- vip-daemonset.yaml

View File

@@ -0,0 +1,52 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-vip-ds
namespace: kube-system
spec:
selector:
matchLabels:
app.kubernetes.io/name: kube-vip-ds
template:
metadata:
labels:
app.kubernetes.io/name: kube-vip-ds
spec:
hostNetwork: true
serviceAccountName: kube-vip
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
containers:
- name: kube-vip
image: ghcr.io/kube-vip/kube-vip:v0.8.3
imagePullPolicy: IfNotPresent
args:
- manager
env:
- name: vip_arp
value: "true"
- name: address
value: "192.168.50.230"
- name: port
value: "6443"
- name: vip_interface
value: "eth0"
- name: cp_enable
value: "true"
- name: svc_enable
value: "false"
- name: servicesElection
value: "false"
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW

View File

@@ -0,0 +1,34 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-vip
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kube-vip-role
rules:
- apiGroups: [""]
resources: ["services", "services/status", "nodes", "endpoints"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch", "update"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kube-vip-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-vip-role
subjects:
- kind: ServiceAccount
name: kube-vip
namespace: kube-system

View File

@@ -0,0 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- argocd/application.yaml
- cilium/application.yaml
- kube-vip/application.yaml

View File

@@ -0,0 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: argocd
resources:
- namespace.yaml
- https://raw.githubusercontent.com/argoproj/argo-cd/v2.14.7/manifests/install.yaml

View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: Namespace
metadata:
name: argocd

View File

@@ -0,0 +1,21 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: noble-root
namespace: argocd
spec:
project: default
destination:
server: https://kubernetes.default.svc
namespace: argocd
source:
repoURL: https://gitea.pcenicni.ca/gsdavidp/home-server.git
targetRevision: HEAD
path: clusters/noble/apps
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

112
talos/README.md Normal file
View File

@@ -0,0 +1,112 @@
# Talos deployment (4 nodes)
This directory contains a `talhelper` cluster definition for a 4-node Talos
cluster:
- 3 hybrid control-plane/worker nodes: `noble-cp-1..3`
- 1 worker-only node: `noble-worker-1`
- `allowSchedulingOnControlPlanes: true`
- CNI: `none` (for Cilium via GitOps)
## 1) Update values for your environment
Edit `talconfig.yaml`:
- `endpoint` (Kubernetes API VIP or LB IP)
- each node `ipAddress`
- each node `installDisk` (for example `/dev/sda`, `/dev/nvme0n1`)
- `talosVersion` / `kubernetesVersion` if desired
## 2) Generate cluster secrets and machine configs
From this directory:
```bash
talhelper gensecret > talsecret.sops.yaml
talhelper genconfig
```
Generated machine configs are written to `clusterconfig/`.
## 3) Apply Talos configs
Apply each node file to the matching node IP from `talconfig.yaml`:
```bash
talosctl apply-config --insecure -n 192.168.50.20 -f clusterconfig/noble-noble-cp-1.yaml
talosctl apply-config --insecure -n 192.168.50.30 -f clusterconfig/noble-noble-cp-2.yaml
talosctl apply-config --insecure -n 192.168.50.40 -f clusterconfig/noble-noble-cp-3.yaml
talosctl apply-config --insecure -n 192.168.50.10 -f clusterconfig/noble-noble-worker-1.yaml
```
## 4) Bootstrap the cluster
After all nodes are up (bootstrap once, from any control-plane node):
```bash
talosctl bootstrap -n 192.168.50.20 -e 192.168.50.230
talosctl kubeconfig -n 192.168.50.20 -e 192.168.50.230 .
```
## 5) Validate
```bash
talosctl -n 192.168.50.20 -e 192.168.50.230 health
kubectl get nodes -o wide
```
## 6) GitOps-pinned Cilium values
The Cilium settings that worked for this Talos cluster are now persisted in:
- `clusters/noble/apps/cilium/application.yaml`
That Argo CD `Application` pins chart `1.16.6` and includes the required Helm
values for this environment (API host/port, cgroup settings, IPAM CIDR, and
security capabilities), so future reconciles do not drift back to defaults.
## 7) Argo CD app-of-apps bootstrap
This repo includes an app-of-apps structure for cluster apps:
- Root app: `clusters/noble/root-application.yaml`
- Child apps index: `clusters/noble/apps/kustomization.yaml`
- Argo CD app: `clusters/noble/apps/argocd/application.yaml`
- Cilium app: `clusters/noble/apps/cilium/application.yaml`
Bootstrap once from your workstation:
```bash
kubectl apply -k clusters/noble/bootstrap/argocd
kubectl apply -f clusters/noble/root-application.yaml
```
After this, Argo CD continuously reconciles all applications under
`clusters/noble/apps/`.
## 8) kube-vip API VIP (`192.168.50.230`)
HAProxy has been removed in favor of `kube-vip` running on control-plane nodes.
Manifests are in:
- `clusters/noble/apps/kube-vip/application.yaml`
- `clusters/noble/apps/kube-vip/vip-rbac.yaml`
- `clusters/noble/apps/kube-vip/vip-daemonset.yaml`
The DaemonSet advertises `192.168.50.230` in ARP mode and fronts the Kubernetes
API on port `6443`.
Apply manually (or let Argo CD sync from root app):
```bash
kubectl apply -k clusters/noble/apps/kube-vip
```
Validate:
```bash
kubectl -n kube-system get pods -l app.kubernetes.io/name=kube-vip-ds -o wide
nc -vz 192.168.50.230 6443
```

5
talos/clusterconfig/.gitignore vendored Normal file
View File

@@ -0,0 +1,5 @@
noble-noble-cp-1.yaml
noble-noble-cp-2.yaml
noble-noble-cp-3.yaml
noble-noble-worker-1.yaml
talosconfig

29
talos/kubeconfig Normal file
View File

@@ -0,0 +1,29 @@
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJpRENDQVMrZ0F3SUJBZ0lRQjJqRHdiclpVQXVqU0NpMjVkcEkxVEFLQmdncWhrak9QUVFEQWpBVk1STXcKRVFZRFZRUUtFd3ByZFdKbGNtNWxkR1Z6TUI0WERUSTJNRE15TnpBMk5UWTBORm9YRFRNMk1ETXlOREEyTlRZMApORm93RlRFVE1CRUdBMVVFQ2hNS2EzVmlaWEp1WlhSbGN6QlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VICkEwSUFCTytlK3dhN0V4SW8yN2w4a01yR0ROOTNMbFVtMytGT201Y3FmRkZ2RXdOYTgrT1loM1NPQzFCTWY0S1QKNnVrNTMwZlA1T0VrbFpOTTBCV3N4VkpOQzhxallUQmZNQTRHQTFVZER3RUIvd1FFQXdJQ2hEQWRCZ05WSFNVRQpGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFCkZnUVV2N0hpTE5PSUgwOXBTaTFpNDVUR2FvVXpSZTB3Q2dZSUtvWkl6ajBFQXdJRFJ3QXdSQUlnTmVkdUdsK3AKMzRQdmdGbUJMdmZIWlBzV1hqNmVQa2p0OE8yS0pHUUIvdDRDSUcyNTVIZnYzT09QR0tnYTNMby81L083cjh1bwpyMGhyNDNJR0ltME1FUkZECi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
server: https://192.168.50.230:6443
name: noble
contexts:
- context:
cluster: noble
namespace: default
user: admin@noble
name: admin@noble
- context:
cluster: noble
namespace: default
user: admin@noble-1
name: admin@noble-1
current-context: admin@noble
kind: Config
preferences: {}
users:
- name: admin@noble
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJnekNDQVNxZ0F3SUJBZ0lRVzZVMVVxdk84SnJZUkpzYWJFbVJQVEFLQmdncWhrak9QUVFEQWpBVk1STXcKRVFZRFZRUUtFd3ByZFdKbGNtNWxkR1Z6TUI0WERUSTJNRE15TnpBM01UVTBNbG9YRFRJM01ETXlOekEzTVRVMQpNbG93S1RFWE1CVUdBMVVFQ2hNT2MzbHpkR1Z0T20xaGMzUmxjbk14RGpBTUJnTlZCQU1UQldGa2JXbHVNRmt3CkV3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFYWxMVUdzcXpkdGk5ekNSb1FMc0F3dlA0RjExaC95TzgKQy9neVNZVXRpMW9aLzd4VUdnUElVaUpKWHpLc3VocDhZSU4xYVpEWXZLNSsyN1BLbm1WM3g2TklNRVl3RGdZRApWUjBQQVFIL0JBUURBZ1dnTUJNR0ExVWRKUVFNTUFvR0NDc0dBUVVGQndNQ01COEdBMVVkSXdRWU1CYUFGTCt4CjRpelRpQjlQYVVvdFl1T1V4bXFGTTBYdE1Bb0dDQ3FHU000OUJBTUNBMGNBTUVRQ0lBSUVXbW1UbHlja1piQ0kKNnBDaDRzOWd2aVJCZ2k4NkFwSTFRMVFXTGIzRUFpQjVJMEZXK0V0d2o4bU5NUHVaRk1UUWlIM1o1ZTA4dlNNSQpkajBBdWpRZzZnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUlRU3ZNMFp1a2NVR0RwdndVYTJjRnFoMjFGYWdQRy9kN2Z6bjRDWlZMVzZvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFYWxMVUdzcXpkdGk5ekNSb1FMc0F3dlA0RjExaC95TzhDL2d5U1lVdGkxb1ovN3hVR2dQSQpVaUpKWHpLc3VocDhZSU4xYVpEWXZLNSsyN1BLbm1WM3h3PT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
- name: admin@noble-1
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJoRENDQVNxZ0F3SUJBZ0lRUStjVHg3OWxZdlFkYUROTDZLZTlqVEFLQmdncWhrak9QUVFEQWpBVk1STXcKRVFZRFZRUUtFd3ByZFdKbGNtNWxkR1Z6TUI0WERUSTJNRE15TnpBM01UVXpNVm9YRFRJM01ETXlOekEzTVRVMApNVm93S1RFWE1CVUdBMVVFQ2hNT2MzbHpkR1Z0T20xaGMzUmxjbk14RGpBTUJnTlZCQU1UQldGa2JXbHVNRmt3CkV3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFVzBtcGlCbHA0OEQ3SFU5eVFIS2MwblhCOTJxYzNoNFoKT2pya0xGRksxRnBsOE5xVFdEV2x3NmpsWUFlRWdzL0E1NzB3QzFrazRoZGdiZGJGZ2hZcmJxTklNRVl3RGdZRApWUjBQQVFIL0JBUURBZ1dnTUJNR0ExVWRKUVFNTUFvR0NDc0dBUVVGQndNQ01COEdBMVVkSXdRWU1CYUFGTCt4CjRpelRpQjlQYVVvdFl1T1V4bXFGTTBYdE1Bb0dDQ3FHU000OUJBTUNBMGdBTUVVQ0lRRGdQaDdJUjV0RjhmL3UKRks1N2RpZVplOHoyeEVPWUxYYnZid0pIQXZIMWp3SWdCaW5qOEJHZXVRejZ6QUFjU2Z6aWRTYWlWMlpvZElDUApWZlcrckE3ZzV6MD0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUhUQXZZc2htcHF6VG1XdUhEK1NLcFVTVlppdllmckF5RUY4cGVIK1JiS3FvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFVzBtcGlCbHA0OEQ3SFU5eVFIS2MwblhCOTJxYzNoNFpPanJrTEZGSzFGcGw4TnFUV0RXbAp3NmpsWUFlRWdzL0E1NzB3QzFrazRoZGdiZGJGZ2hZcmJnPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=

36
talos/talconfig.yaml Normal file
View File

@@ -0,0 +1,36 @@
clusterName: noble
endpoint: https://192.168.50.230:6443
talosVersion: v1.8.4
kubernetesVersion: v1.31.1
allowSchedulingOnControlPlanes: true
# Use Cilium installed via GitOps (no bundled Talos CNI).
cniConfig:
name: none
clusterPodNets:
- 10.244.0.0/16
clusterSvcNets:
- 10.96.0.0/12
nodes:
- hostname: noble-cp-1
ipAddress: 192.168.50.20
controlPlane: true
installDisk: /dev/sda
- hostname: noble-cp-2
ipAddress: 192.168.50.30
controlPlane: true
installDisk: /dev/sda
- hostname: noble-cp-3
ipAddress: 192.168.50.40
controlPlane: true
installDisk: /dev/sda
- hostname: noble-worker-1
ipAddress: 192.168.50.10
controlPlane: false
installDisk: /dev/sda

23
talos/talsecret.sops.yaml Normal file
View File

@@ -0,0 +1,23 @@
cluster:
id: FhV8A1tRzXT32GXHpVevBU7p7HJUb3nCZajSpseHGe4=
secret: vhljddQzn/bTLvVpLG2/GysSF36jWowbZn10cc6aLVA=
secrets:
bootstraptoken: tr094l.h13snimxwge8clts
secretboxencryptionsecret: mBXrzcwJFcRIKPaoL+2v41eh1F6CJ5xRm437BvAv59M=
trustdinfo:
token: 2u1g6w.04nz6h435zz8eo1u
certs:
etcd:
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJmVENDQVNPZ0F3SUJBZ0lRVTd5ZXhlOHNYc1BDa1V3eHFuenpsVEFLQmdncWhrak9QUVFEQWpBUE1RMHcKQ3dZRFZRUUtFd1JsZEdOa01CNFhEVEkyTURNeU56QTJOVFkwTkZvWERUTTJNRE15TkRBMk5UWTBORm93RHpFTgpNQXNHQTFVRUNoTUVaWFJqWkRCWk1CTUdCeXFHU000OUFnRUdDQ3FHU000OUF3RUhBMElBQk9jVUZVWWJFQjF1Ckt4clQ3bFhYakpSSmc5bng5Qk52ZTFJV0wxczBsVjVvbWN6SVo1d3FxRjdJOHJaNzIrUUpDS2R0QXh5OUhSeDcKWU9XcG1vcXduMWFqWVRCZk1BNEdBMVVkRHdFQi93UUVBd0lDaERBZEJnTlZIU1VFRmpBVUJnZ3JCZ0VGQlFjRApBUVlJS3dZQkJRVUhBd0l3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVd1JRUW1YVy9qV1JDCkxPL2lNV2FlaUNKd3pKRXdDZ1lJS29aSXpqMEVBd0lEU0FBd1JRSWdBeXVXTXZ0N1pyNWs1aDRJbkxlUWIwQkIKb0hZM0xpMEpiOUhTcmdPK2hZa0NJUUNYT2VwSzJ6Z20rbUdBRFFkdXNlaklmR0Z3SHNSRVRRU3Z0R3R4RE5FaQpQdz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUtGbGxleS9LNGMvanNXMGQ4R1h0UnFudTFDNVNzVmF3RUd1ZkNHN1d3OG5vQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFNXhRVlJoc1FIVzRyR3RQdVZkZU1sRW1EMmZIMEUyOTdVaFl2V3pTVlhtaVp6TWhubkNxbwpYc2p5dG52YjVBa0lwMjBESEwwZEhIdGc1YW1haXJDZlZnPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
k8s:
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJpRENDQVMrZ0F3SUJBZ0lRQjJqRHdiclpVQXVqU0NpMjVkcEkxVEFLQmdncWhrak9QUVFEQWpBVk1STXcKRVFZRFZRUUtFd3ByZFdKbGNtNWxkR1Z6TUI0WERUSTJNRE15TnpBMk5UWTBORm9YRFRNMk1ETXlOREEyTlRZMApORm93RlRFVE1CRUdBMVVFQ2hNS2EzVmlaWEp1WlhSbGN6QlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VICkEwSUFCTytlK3dhN0V4SW8yN2w4a01yR0ROOTNMbFVtMytGT201Y3FmRkZ2RXdOYTgrT1loM1NPQzFCTWY0S1QKNnVrNTMwZlA1T0VrbFpOTTBCV3N4VkpOQzhxallUQmZNQTRHQTFVZER3RUIvd1FFQXdJQ2hEQWRCZ05WSFNVRQpGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFCkZnUVV2N0hpTE5PSUgwOXBTaTFpNDVUR2FvVXpSZTB3Q2dZSUtvWkl6ajBFQXdJRFJ3QXdSQUlnTmVkdUdsK3AKMzRQdmdGbUJMdmZIWlBzV1hqNmVQa2p0OE8yS0pHUUIvdDRDSUcyNTVIZnYzT09QR0tnYTNMby81L083cjh1bwpyMGhyNDNJR0ltME1FUkZECi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUlKMlNxb0pLQnhLTitOMGpWMW9UYnhsK1pSanlvUDJjNElwYW01OWJJZ25vQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFNzU3N0Jyc1RFaWpidVh5UXlzWU0zM2N1VlNiZjRVNmJseXA4VVc4VEExcno0NWlIZEk0TApVRXgvZ3BQcTZUbmZSOC9rNFNTVmswelFGYXpGVWswTHlnPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
k8saggregator:
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJYekNDQVFhZ0F3SUJBZ0lSQVBuMUdTa1BNUHFISkJ3b0lCM3JzWEF3Q2dZSUtvWkl6ajBFQXdJd0FEQWUKRncweU5qQXpNamN3TmpVMk5EUmFGdzB6TmpBek1qUXdOalUyTkRSYU1BQXdXVEFUQmdjcWhrak9QUUlCQmdncQpoa2pPUFFNQkJ3TkNBQVRCZEp3SEtYMXM2KzcxQ0c3SHFzSWFpQnRBYkd0TlBEVkJoMEt1ZG1OaXpnYjlpT3JVCjFDb2JUaG9WbXB1R2tYUGVzb0gyUXRXK21UVUtKSnk5YWQxMm8yRXdYekFPQmdOVkhROEJBZjhFQkFNQ0FvUXcKSFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUZNQU1CQWY4dwpIUVlEVlIwT0JCWUVGRW5lb1JtQ1B3MmVRNGVMSkJkMCtreUFJQ1ZsTUFvR0NDcUdTTTQ5QkFNQ0EwY0FNRVFDCklEY2FMblZ4YWRkYXhqcWcxbEFaMDBDQ201Qm1CZzhiU0ZpTXN0ZzFWNk0vQWlBTkJWeWZoU1JLM3ZSTzV1MTYKTHNydmpuaXd3ZGNlOFo2eVBYZUVZcWs4VXc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSU90WGtCUlNUY3ptQzZzSDJES2Z4WmVPb0JIR2xzVlF6M0VHYWhDdnZVUWhvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFd1hTY0J5bDliT3Z1OVFodXg2ckNHb2diUUd4clRUdzFRWWRDcm5aallzNEcvWWpxMU5RcQpHMDRhRlpxYmhwRnozcktCOWtMVnZwazFDaVNjdlduZGRnPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
k8sserviceaccount:
key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS2dJQkFBS0NBZ0VBdFdadlNyVmlQd1RBcEo2K0FBOVpwOEVwbHV3SzNqZmYzaXlzMWxna0Q4K0gwWE1SCm1pbTRlQVpBR05SSS8xVERvMVIxY2lTdlA3MFgwTk1SeEVqY21jS1NQYWU2TnhyU3ptbXF2ajM5akpRQ2F2VkoKY3EvcTEreUJ0anVNYkJFTDNHSFlBU1FwUXVXOVRZTDAzU0h1YkpSYXUrcWlCU0YrWDJFZlo3ZGtKTFVXbC9xWgpiZlMzWVZTWFJVbFZxYjJSQ2pzTHZ0V2RXMjBZK1FEZ1dERms1WlhKWnloRzNNeEp6NkdQU1pxZ0NJYkRUNkluCkNZSFRZaGVpQUFGcG5xKzJwNUxaRnRoMU04YzdDRnU0dVY4ZWJ6OG40aTdCUVpYWnFCSExqU2U5Y0xsV05MWTkKZGtpdHNXZk1qbjhnMXgwUWpqSEoxeFJzMmM1ekVpWlVOYnZhZkNDV0FQbXg5MDFsVmNvS3RvSEpQL2VoYUVPaApqMUdld1V0K2hIbUlvSm5lbnQ0RVRGTmVvSEdhZC9NS1dUNWNjWEw2SUUyc2FSUjM2OVcvRU16cTZyWURoUDNyCm5oT0RBdldnVHVGUE9ucjR5RUJ0dHJCVzNCVTZZd3Jhb3FvYzhhVGlGMWJnemNuQmVKc0piRlpVN0hjVmxpc2MKNnFCWWNncHFoUUhUelV6OXduZ3hLNHV4QXlCUnlZRm15akNoUVZGSjd4UWZlYVRWRVovTnc5c1hZL2xSZDh5dQppMU56emxnMGVlWHRiQ01KY2FkT1dPZEZZaDd0QkZXYjZvZC9GbjV6WVBFY0JLdDFIb0laT1JJMzdDN2RrT042Cm1uZjJscEZVNk9idU5LN050azJwak1pRmMzVHVDZ3d4R281WSszb01TSFBzTWhFakNabHQwZ1MvbGJzQ0F3RUEKQVFLQ0FnQWlua0RnOWdxZzBpeGRmam51VXYrZUp4dmp4SG95ZkdGSnlpYlY1UTVFd2lzK1NvSnlkRUhURGdaUApkcnZUbG42YmZmUEg2NzVTSUtrWjNoNEc0b3pPL3pYZmRGSHlVRGtvMFR1WGdNY1JlL0dXTGVkdGJxc1h0L2Z0CktpSWJRWW1NN2xORnJIdi9XMDZzS3pERnZzTDhqN2RkSTJMMkxiVXJTS0t3cld2OElWOEZjL3F4NUVEVzMwamIKSFNxdThSRnI3V2JKYllUUlBObkdNMmVkRFJnZlJGMmlSU3A0MnJlL1d5cTROajBTUTMya2hlS0RTdlpuUXZGVQpwUEJlSzFSbFdIMzdnU1drMHdHdUQ2c0tIVi8yaFF2OGUwWEFXWE9uUW5ZaEl4TmhIczJYMDZ1WkZqZW5vcEtFCkl6akdOTExESURkUHg0TWFjZTY5NlBpckpJV0dWbHNTTDRESWJXekZNdE9lbmE5bFFncDNtZXpEWWN6dk9uT2gKZjF6K05EYi9jKzY0dzh5UGQxSHRJY3ZQaEgxc205RmtRU2lGVFJFK0lvVk1ONDVrT0gwb0NGZ2YwdlN3SEF6Lwo4dnlaUTh2UVpkdEZFNlluOGE1R0xUZ3p4aHBXdzJxV0E3ZEpLVGwxbWo2RDZ5d0tjRjVnaHAvSUgyRXp0VzFuCnJpRzUrVGFsaDk1SWVaSDNTYzRQZHZXTUd5ZnNEaHRGejgwWWpEQjR0ZG1PTkJRcU1kNmdwMFkrVWxySnR2RTEKYk1tU3VPTkJNRngvQmwwSncrelQ0YUpndHNzZzFwSmJpeXU1RlJRd3VQRkF0dWgyQWdCTld4blR4STlVMG9QdApITGREMjA1bktPYXBVRDFkeTZQbUdKRUFWVlhHMXEwUi91K0ZOL3AzejhkeEY5amU0UUtDQVFFQXlrTS9FbGtkCkZmZkNmbzV1amYwZUJZai9WSjlGaVFoK2xYK1FIVFEzM0hLZG5FTmFSYXFiODlsUEJDdmJ5L2tKZ2VpaTRyTysKRDgvb1cvRndpQk1sY3Y1Y1k0Nmo4ZEFvbjFFSm00U0k2VWRhTXl6cjM3SFh6L2o3aWJOWlAyaXNET3o5UTNYegpIUDVabEJZY0U2U0hTWDFOOXFwd21OR1l5dEY4aC9jdHkzNzlnREZrSXZsM00rM2xYMWhkc0ZaSVZuTmlVWFRTCmRIUFBBZjJ3clJ2bHdkbDJ1M0JZNGtZaVlKRXRsR0Y1bnl3eDZXR3B1bUtoaEM3SXF4OGU2TERiem01UkV3N0QKUElMZDlpbERweFpuZzRPSCtIQ0NISzU1bS9WQWFpRm12RFRMSTFHUWVLQVpMZ25QWUZiWFNqODlnK3JZR0p4VQpFL1lMSTR2YWFLY05FUUtDQVFFQTVaZzhyc1hPejdCK3BuNElSTHJSUC9GYy9oRDZkTmY4Qm5DdHc4bkRuYkVJCkdFS3BMTVNNK2xhbklKYWVXcVZmeFErYTlwSkdaejM3ZjNiVENLeGh2d2dEZG5xSHV6a0FtU01ldGM0TG1pVWQKTlRHcTJFWkw3eWtNdDJobHQvTThYZ1k1cHhJL21FS3ZSbWFIRDZsc1pTWjFhM3NVSWp0RzUrM0FEQjhreHhpVApRcmFPVTBjRGNhSWNKNjlZbko3bzVCZEFxQ2xvV0VDUzh1cGdPdzNqZm9vR3QvdFZwLzJyN3BKbmNIN0JVZHd1CjNQdFU4L0RrdTNDUEZoWFUzbDBXTXNUUEV0cnJGS2RzV0xtK21iYTNFdXFKRURvZDlQK1YxQTVZSXFZYmMwY1kKcVNaaXpOSnJKSkp4T3h3aDc3VGVtWGl1dVdVeE5DNWVveGV0NVltbUN3S0NBUUVBdldGM2hjT0FzMWYzZVMzOQpuOTczSkRHZytPZmtZS2xlZExZckJ0MGt6TGxZajc2VW9KUmRUMVlTWVJKN3k2RlRZSnFsSU9VeE9YYnUxbC9iCmdOMkVmQVprRlNleW83REd5RjFGUktNMDJrL2Z5Zmp1cGRYTC8wUGVxWkVQS0lybVJYZ0Vyd3lhWkhSWEJZd3EKSDg0MmlmM1VhUGd2VXpjMCsvcG53cHNTK3UzZGlCRTI5SFJtUTI0bERVQWRBUVhZMTNGVUJuYitzdURZVzhIZwpra1dEdkJ6VXlpNG9XejFWNU5zcU5UdUxlQmtXWnJIMkRMbGJCL3dTRWYraW5qY3lxRGVzbTg1L3lZR3pPRkJzCnN0OE1ieHhSekxIemNjMS91aUpKZk5YbmJxTno3STdyV2JaMXZTQ2NWbFVaWWNDUzVaaVhXM3ZNVWFCWHo2R2MKRDg4U01RS0NBUUVBeHR0YnU5aXlMcXJrbDFuVDJZdWhqMnVES3I4VDNyM3ZtTGhobUpHWnIyeFU0WVpqTnRZcQpjTzA2cGZ3dXZiNDh1OWF2Vmw4TlFZQ3E0eFRNNWRkQWRnLy94OCtLM2pzWjdJbEJvU0FNWm44ODFBVG52NWpyClRnTFU4OG9sUi9VUjFUSTVIeDZzSERtdHpDRWpYQXBYU3lqTFRNTjJoY3VudDF2eUdjMmpzaG56K2pWYUFvRWcKVjN5Y1BEY2dYYzg1VWMxZUFBaVZTdExyTkNDU0pyUDUxWERCTHZzdWpta2xVR1pYMTFUQ0poKzZLMFk0cDJ4LwpBR1lXV0graU50S1RWbmVtRHVPejl0aW4vQlV0STcvZ3d5NkdkcHFQdGRMbE41MFE4em5CenMvR2FVTkpFYlF5CmZxT2tGUmxodjFkOThabFlaRlZrRDVrVitOYWFsSlByVHdLQ0FRRUFnbjVlbkRtZy9tYWQ3cTFDRUs1aklKck0KRDBWQTE4bjFYcnlpU1owS2lacWFMYzhaRTVIVXZtSkpCNUN3aU55VHgwWWtmUWdTM2tEemlaRzdqbjdSVU1aWApaV3A0RHpVdk0wdHpMRGx1bHNreUM1czBtT3ZIWXZ2QUs4YXNKTm10VXJ1TE16cmVLU3h6K0RIeVBDdEtlQTF2CjJJV3ppWHVsNXZiRSt4SU1qVkVZYVNxREVvbmFBQUNMTEYrc0lyZ1lMM25QcmJyT3pWd0NUNHlKMmxqNURlcmcKbUtqbGYzak9weVd4ejdJODB6dHlRQk9ma1B2b0U1dTRDb2hxK05kYmo5Nmdoa1g3ZG82NmJScFY4cWVmbERNUQowL240YkY4Ri9Pcm9LZ1JQMzY0ZitRNGFQSUxQMXhtQXRIS0VVdFlRR09HQkhuc1g3MitnZDhGZFRtaEFSUT09Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
os:
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJQekNCOHFBREFnRUNBaEVBOFJUSlQ3U0NHTVVYbS93ZFFHM3lUakFGQmdNclpYQXdFREVPTUF3R0ExVUUKQ2hNRmRHRnNiM013SGhjTk1qWXdNekkzTURZMU5qUTBXaGNOTXpZd016STBNRFkxTmpRMFdqQVFNUTR3REFZRApWUVFLRXdWMFlXeHZjekFxTUFVR0F5dGxjQU1oQUJxRERGdUN4bnhVVFBMNHoyWk83S291YVpXZkE3cE5zK0UrCml0dk9IVHdxbzJFd1h6QU9CZ05WSFE4QkFmOEVCQU1DQW9Rd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUcKQ0NzR0FRVUZCd01DTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3SFFZRFZSME9CQllFRk41NlR5UTlpSENKckI5dApYSzZnM3YvczdEcFVNQVVHQXl0bGNBTkJBQkRyWHYySFkrQjBvUk1kQXJUSnljWmdENnBqSEQyMEhpb0tIdWZ3Ck5vZ292S1c3QXFZV1pvdGEwNzlMb2ZsclZOT1gvdTFUbmx6ajAyenZnV1hqM2drPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
key: LS0tLS1CRUdJTiBFRDI1NTE5IFBSSVZBVEUgS0VZLS0tLS0KTUM0Q0FRQXdCUVlESzJWd0JDSUVJREpyYmNZTmx5dk1JYXBvb1VMY0xaTEZ4QXNSQVQweWFhRS8zMlI1UFNXdwotLS0tLUVORCBFRDI1NTE5IFBSSVZBVEUgS0VZLS0tLS0K