Compare commits

...

30 Commits

Author SHA1 Message Date
Nikholas Pcenicni
9daff01b0b Add OIDC environment variables to Termix service in Docker Compose 2026-02-17 02:03:58 -05:00
Nikholas Pcenicni
bbea917ef7 Add .env.sample for OIDC configuration 2026-02-17 02:02:15 -05:00
f1ad4bb441 Update komodo/automate/termix/compose.yaml 2026-02-17 06:40:42 +00:00
28b586eea6 Update komodo/automate/termix/compose.yaml 2026-02-17 06:32:50 +00:00
4452bafdbe Add komodo/automate/termix/compose.yaml 2026-02-17 06:30:58 +00:00
Nikholas Pcenicni
af6fd2104c Add init flag to Seerr service in Docker Compose configuration 2026-02-16 19:25:33 -05:00
030d2bdae5 Update komodo/arr/arrs/compose.yaml 2026-02-17 00:10:19 +00:00
Nikholas Pcenicni
323ccd5a65 Add Docker Compose configuration and environment sample for SparkyFitness 2026-02-15 22:02:53 -05:00
Nikholas Pcenicni
2eb458a169 Add Docker Compose configuration for Fleet service with MySQL and Redis 2026-02-13 00:32:19 -05:00
Nikholas Pcenicni
797aa2e514 Add Docker Compose configuration for Watchstate service 2026-02-12 12:31:27 -05:00
Nikholas Pcenicni
1d40a0a7ec Changed volume mount on jellyfin deprecated 2026-02-11 17:39:53 -05:00
Nikholas Pcenicni
0fce675f67 Add Docker Compose configuration and environment sample for AdventureLog 2026-02-06 15:39:04 -05:00
Nikholas Pcenicni
b16f83a59d Add Jellyfin + macOS: Persistent NFS Mount documentation 2026-02-01 23:37:38 -05:00
323b59835e Add coder/proxmox-vm/terraform.tfvars 2026-01-31 05:25:46 +00:00
8146c64a7f Add coder/proxmox-vm/cloud-init/user-data.tftpl 2026-01-31 05:24:55 +00:00
a0df894a3d Add coder/proxmox-vm/Readme.md 2026-01-31 05:24:09 +00:00
f0ee61ebe2 Add coder/proxmox-vm/main.tf 2026-01-31 05:23:21 +00:00
701d92b48a Update komodo/automate/coder/compose.yaml 2026-01-31 04:39:23 +00:00
fe72dad0e9 Update komodo/automate/coder/compose.yaml 2026-01-31 04:27:21 +00:00
7db7777f2b Update komodo/automate/coder/compose.yaml 2026-01-31 04:19:31 +00:00
125bac0f5d Update komodo/automate/coder/.env.sample 2026-01-31 04:19:02 +00:00
63dae839ce Update komodo/automate/coder/compose.yaml 2026-01-31 04:16:10 +00:00
c1eb0bc7ae Update komodo/automate/coder/compose.yaml
Added group_add for docker sock permission
2026-01-31 04:06:54 +00:00
d493a1eefd Add komodo/automate/coder/.env.sample 2026-01-31 03:57:39 +00:00
65b171e55f Add komodo/automate/coder/compose.yaml 2026-01-31 03:55:17 +00:00
3b1ed02aa2 Update komodo/auth/Authentik/compose.yaml 2026-01-31 03:06:05 +00:00
b712b36dc8 Update komodo/auth/Authentik/compose.yaml 2026-01-31 02:34:38 +00:00
dfd811e839 Add komodo/auth/Authentik/.env.sample 2026-01-31 02:17:51 +00:00
a1c42305cd Add komodo/auth/Authentik/compose.yaml 2026-01-31 02:15:26 +00:00
Nikholas Pcenicni
fcb2119859 feat: Introduce an Ansible common role for base system configuration, including packages, users, and Netplan networking, alongside Semaphore playbooks for system bootstrapping and Proxmox management. 2026-01-19 03:47:55 -05:00
33 changed files with 2080 additions and 13 deletions

View File

@@ -0,0 +1,26 @@
---
- name: Register Target Host
hosts: localhost
connection: local
gather_facts: no
tasks:
- name: Verify target_host is defined
fail:
msg: "The 'target_host' variable must be defined (e.g. 192.168.1.10)"
when: target_host is not defined
- name: Add target host to inventory
add_host:
name: target_node
ansible_host: "{{ target_host }}"
ansible_user: "{{ target_user | default('root') }}"
ansible_ssh_pass: "{{ target_password | default(omit) }}"
ansible_ssh_private_key_file: "{{ target_private_key_file | default(omit) }}"
ansible_python_interpreter: /usr/bin/python3
- name: Bootstrap Node
hosts: target_node
become: yes
gather_facts: yes
roles:
- common

View File

@@ -0,0 +1,29 @@
---
- name: Register Target Host
hosts: localhost
connection: local
gather_facts: no
tasks:
- name: Verify target_host is defined
fail:
msg: "The 'target_host' variable must be defined (e.g. 192.168.1.10)"
when: target_host is not defined
- name: Add target host to inventory
add_host:
name: target_node
ansible_host: "{{ target_host }}"
ansible_user: "{{ target_user | default('root') }}"
ansible_ssh_pass: "{{ target_password | default(omit) }}"
ansible_ssh_private_key_file: "{{ target_private_key_file | default(omit) }}"
ansible_python_interpreter: /usr/bin/python3
- name: Configure Networking
hosts: target_node
become: yes
gather_facts: yes
tasks:
- name: Run networking task from common role
include_role:
name: common
tasks_from: networking.yml

View File

@@ -0,0 +1,29 @@
---
- name: Register Target Host
hosts: localhost
connection: local
gather_facts: no
tasks:
- name: Verify target_host is defined
fail:
msg: "The 'target_host' variable must be defined (e.g. 192.168.1.10)"
when: target_host is not defined
- name: Add target host to inventory
add_host:
name: target_node
ansible_host: "{{ target_host }}"
ansible_user: "{{ target_user | default('root') }}"
ansible_ssh_pass: "{{ target_password | default(omit) }}"
ansible_ssh_private_key_file: "{{ target_private_key_file | default(omit) }}"
ansible_python_interpreter: /usr/bin/python3
- name: Configure Users
hosts: target_node
become: yes
gather_facts: yes
tasks:
- name: Run users task from common role
include_role:
name: common
tasks_from: users.yml

View File

@@ -0,0 +1,34 @@
---
- name: Register Proxmox Host
hosts: localhost
connection: local
gather_facts: no
tasks:
- name: Verify proxmox_host is defined
fail:
msg: "The 'proxmox_host' variable must be defined."
when: proxmox_host is not defined
- name: Verify proxmox_action is defined
fail:
msg: "The 'proxmox_action' variable must be defined (e.g. create_vm, create_template, delete_vm)."
when: proxmox_action is not defined
- name: Add Proxmox host to inventory
add_host:
name: proxmox_node
ansible_host: "{{ proxmox_host }}"
ansible_user: "{{ proxmox_user | default('root') }}"
ansible_ssh_pass: "{{ proxmox_password | default(omit) }}"
ansible_ssh_private_key_file: "{{ proxmox_private_key_file | default(omit) }}"
ansible_python_interpreter: /usr/bin/python3
- name: Execute Proxmox Action
hosts: proxmox_node
become: yes
gather_facts: yes
vars:
# Explicitly map the action variable if needed, though role should pick it up from host vars or extra vars
proxmox_action: "{{ proxmox_action }}"
roles:
- proxmox_vm

View File

@@ -0,0 +1,30 @@
---
# Common packages to install
common_packages:
- curl
- wget
- git
- vim
- htop
- net-tools
- unzip
- dnsutils
- software-properties-common
- ca-certificates
- gnupg
- openssh-server
# SSH Configuration
common_ssh_users:
- name: "{{ ansible_user_id }}"
keys: []
# Add your keys in inventory or group_vars override
# Networking
common_configure_static_ip: false
common_interface_name: "eth0"
# common_ip_address: "192.168.1.100/24"
# common_gateway: "192.168.1.1"
common_dns_servers:
- "1.1.1.1"
- "8.8.8.8"

View File

@@ -0,0 +1,6 @@
---
- name: Apply Netplan
shell: netplan apply
async: 45
poll: 0
ignore_errors: yes

View File

@@ -0,0 +1,10 @@
---
- name: Install common packages
import_tasks: packages.yml
- name: Configure users and SSH keys
import_tasks: users.yml
- name: Configure networking
import_tasks: networking.yml
when: common_configure_static_ip | bool

View File

@@ -0,0 +1,23 @@
---
- name: Verify required variables for static IP
fail:
msg: "common_ip_address and common_interface_name must be defined when common_configure_static_ip is true."
when:
- common_configure_static_ip | bool
- (common_ip_address is not defined or common_ip_address | length == 0 or common_interface_name is not defined)
- name: Install netplan.io
apt:
name: netplan.io
state: present
when: ansible_os_family == "Debian"
- name: Configure Netplan
template:
src: netplan_config.yaml.j2
dest: /etc/netplan/01-netcfg.yaml
owner: root
group: root
mode: '0644'
notify: Apply Netplan
when: common_configure_static_ip | bool

View File

@@ -0,0 +1,12 @@
---
- name: Update apt cache
apt:
update_cache: yes
cache_valid_time: 3600
when: ansible_os_family == "Debian"
- name: Install common packages
apt:
name: "{{ common_packages }}"
state: present
when: ansible_os_family == "Debian"

View File

@@ -0,0 +1,18 @@
---
- name: Ensure users exist
user:
name: "{{ item.name }}"
shell: /bin/bash
groups: sudo
append: yes
state: present
loop: "{{ common_ssh_users }}"
when: item.create_user | default(false)
- name: Add SSH keys
authorized_key:
user: "{{ item.0.name }}"
key: "{{ item.1 }}"
loop: "{{ common_ssh_users | subelements('keys', skip_missing=True) }}"
loop_control:
label: "{{ item.0.name }}"

View File

@@ -0,0 +1,15 @@
network:
version: 2
ethernets:
{{ common_interface_name }}:
dhcp4: no
addresses:
- {{ common_ip_address }}
{% if common_gateway %}
gateway4: {{ common_gateway }}
{% endif %}
nameservers:
addresses:
{% for server in common_dns_servers %}
- {{ server }}
{% endfor %}

161
coder/proxmox-vm/Readme.md Normal file
View File

@@ -0,0 +1,161 @@
---
display_name: Proxmox VM
description: Provision VMs on Proxmox VE as Coder workspaces
icon: ../../../../.icons/proxmox.svg
verified: false
tags: [proxmox, vm, cloud-init, qemu]
---
# Proxmox VM Template for Coder
Provision Linux VMs on Proxmox as [Coder workspaces](https://coder.com/docs/workspaces). The template clones a cloudinit base image, injects userdata via Snippets, and runs the Coder agent under the workspace owner's Linux user.
## Prerequisites
- Proxmox VE 8/9
- Proxmox API token with access to nodes and storages
- SSH access from Coder provisioner to Proxmox VE
- Storage with "Snippets" content enabled
- Ubuntu cloudinit image/template on Proxmox
- Latest images: https://cloud-images.ubuntu.com/ ([source](https://cloud-images.ubuntu.com/))
## Prepare a Proxmox CloudInit Template (once)
Run on the Proxmox node. This uses a RELEASE variable so you always pull a current image.
```bash
# Choose a release (e.g., jammy or noble)
RELEASE=jammy
IMG_URL="https://cloud-images.ubuntu.com/${RELEASE}/current/${RELEASE}-server-cloudimg-amd64.img"
IMG_PATH="/var/lib/vz/template/iso/${RELEASE}-server-cloudimg-amd64.img"
# Download cloud image
wget "$IMG_URL" -O "$IMG_PATH"
# Create base VM (example ID 999), enable QGA, correct boot order
NAME="ubuntu-${RELEASE}-cloudinit"
qm create 999 --name "$NAME" --memory 4096 --cores 2 \
--net0 virtio,bridge=vmbr0 --agent enabled=1
qm set 999 --scsihw virtio-scsi-pci
qm importdisk 999 "$IMG_PATH" local-lvm
qm set 999 --scsi0 local-lvm:vm-999-disk-0
qm set 999 --ide2 local-lvm:cloudinit
qm set 999 --serial0 socket --vga serial0
qm set 999 --boot 'order=scsi0;ide2;net0'
# Enable Snippets on storage 'local' (onetime)
pvesm set local --content snippets,vztmpl,backup,iso
# Convert to template
qm template 999
```
Verify:
```bash
qm config 999 | grep -E 'template:|agent:|boot:|ide2:|scsi0:'
```
### Enable Snippets via GUI
- Datacenter → Storage → select `local` → Edit → Content → check "Snippets" → OK
- Ensure `/var/lib/vz/snippets/` exists on the node for snippet files
- Template page → CloudInit → Snippet Storage: `local` → File: your yml → Apply
## Configure this template
Edit `terraform.tfvars` with your environment:
```hcl
# Proxmox API
proxmox_api_url = "https://<PVE_HOST>:8006/api2/json"
proxmox_api_token_id = "<USER@REALM>!<TOKEN>"
proxmox_api_token_secret = "<SECRET>"
# SSH to the node (for snippet upload)
proxmox_host = "<PVE_HOST>"
proxmox_password = "<NODE_ROOT_OR_SUDO_PASSWORD>"
proxmox_ssh_user = "root"
# Infra defaults
proxmox_node = "pve"
disk_storage = "local-lvm"
snippet_storage = "local"
bridge = "vmbr0"
vlan = 0
clone_template_vmid = 999
```
### Variables (terraform.tfvars)
- These values are standard Terraform variables that the template reads at apply time.
- Place secrets (e.g., `proxmox_api_token_secret`, `proxmox_password`) in `terraform.tfvars` or inject with environment variables using `TF_VAR_*` (e.g., `TF_VAR_proxmox_api_token_secret`).
- You can also override with `-var`/`-var-file` if you run Terraform directly. With Coder, the repo's `terraform.tfvars` is bundled when pushing the template.
Variables expected:
- `proxmox_api_url`, `proxmox_api_token_id`, `proxmox_api_token_secret` (sensitive)
- `proxmox_host`, `proxmox_password` (sensitive), `proxmox_ssh_user`
- `proxmox_node`, `disk_storage`, `snippet_storage`, `bridge`, `vlan`, `clone_template_vmid`
- Coder parameters: `cpu_cores`, `memory_mb`, `disk_size_gb`
## Proxmox API Token (GUI/CLI)
Docs: https://pve.proxmox.com/wiki/User_Management#pveum_tokens
GUI:
1. (Optional) Create automation user: Datacenter → Permissions → Users → Add (e.g., `terraform@pve`)
2. Permissions: Datacenter → Permissions → Add → User Permission
- Path: `/` (or narrower covering your nodes/storages)
- Role: `PVEVMAdmin` + `PVEStorageAdmin` (or `PVEAdmin` for simplicity)
3. Token: Datacenter → Permissions → API Tokens → Add → copy Token ID and Secret
4. Test:
```bash
curl -k -H "Authorization: PVEAPIToken=<USER@REALM>!<TOKEN>=<SECRET>" \
https:// < PVE_HOST > :8006/api2/json/version
```
CLI:
```bash
pveum user add terraform@pve --comment 'Terraform automation user'
pveum aclmod / -user terraform@pve -role PVEAdmin
pveum user token add terraform@pve terraform --privsep 0
```
## Use
```bash
# From this directory
coder templates push --yes proxmox-cloudinit --directory . | cat
```
Create a workspace from the template in the Coder UI. First boot usually takes 60120s while cloudinit runs.
## How it works
- Uploads rendered cloudinit userdata to `<storage>:snippets/<vm>.yml` via the provider's `proxmox_virtual_environment_file`
- VM config: `virtio-scsi-pci`, boot order `scsi0, ide2, net0`, QGA enabled
- Linux user equals Coder workspace owner (sanitized). To avoid collisions, reserved names (`admin`, `root`, etc.) get a suffix (e.g., `admin1`). User is created with `primary_group: adm`, `groups: [sudo]`, `no_user_group: true`
- systemd service runs as that user:
- `coder-agent.service`
## Troubleshooting quick hits
- iPXE boot loop: ensure template has bootable root disk and boot order `scsi0,ide2,net0`
- QGA not responding: install/enable QGA in template; allow 60120s on first boot
- Snippet upload errors: storage must include `Snippets`; token needs Datastore permissions; path format `<storage>:snippets/<file>` handled by provider
- Permissions errors: ensure the token's role covers the target node(s) and storages
- Verify snippet/QGA: `qm config <vmid> | egrep 'cicustom|ide2|ciuser'`
## References
- Ubuntu Cloud Images (latest): https://cloud-images.ubuntu.com/ ([source](https://cloud-images.ubuntu.com/))
- Proxmox qm(1) manual: https://pve.proxmox.com/pve-docs/qm.1.html
- Proxmox CloudInit Support: https://pve.proxmox.com/wiki/Cloud-Init_Support
- Terraform Proxmox provider (bpg): `bpg/proxmox` on the Terraform Registry
- Coder Best practices & templates:
- https://coder.com/docs/tutorials/best-practices/speed-up-templates
- https://coder.com/docs/tutorials/template-from-scratch

View File

@@ -0,0 +1,53 @@
#cloud-config
hostname: ${hostname}
users:
- name: ${linux_user}
groups: [sudo]
shell: /bin/bash
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
package_update: false
package_upgrade: false
packages:
- curl
- ca-certificates
- git
- jq
write_files:
- path: /opt/coder/init.sh
permissions: "0755"
owner: root:root
encoding: b64
content: |
${coder_init_script_b64}
- path: /etc/systemd/system/coder-agent.service
permissions: "0644"
owner: root:root
content: |
[Unit]
Description=Coder Agent
Wants=network-online.target
After=network-online.target
[Service]
Type=simple
User=${linux_user}
WorkingDirectory=/home/${linux_user}
Environment=HOME=/home/${linux_user}
Environment=CODER_AGENT_TOKEN=${coder_token}
ExecStart=/opt/coder/init.sh
OOMScoreAdjust=-1000
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target
runcmd:
- systemctl daemon-reload
- systemctl enable --now coder-agent.service
final_message: "Cloud-init complete on ${hostname}"

283
coder/proxmox-vm/main.tf Normal file
View File

@@ -0,0 +1,283 @@
terraform {
required_providers {
coder = {
source = "coder/coder"
}
proxmox = {
source = "bpg/proxmox"
}
}
}
provider "coder" {}
provider "proxmox" {
endpoint = var.proxmox_api_url
api_token = "${var.proxmox_api_token_id}=${var.proxmox_api_token_secret}"
insecure = true
# SSH is needed for file uploads to Proxmox
ssh {
username = var.proxmox_ssh_user
password = var.proxmox_password
node {
name = var.proxmox_node
address = var.proxmox_host
}
}
}
variable "proxmox_api_url" {
type = string
}
variable "proxmox_api_token_id" {
type = string
sensitive = true
}
variable "proxmox_api_token_secret" {
type = string
sensitive = true
}
variable "proxmox_host" {
description = "Proxmox node IP or DNS for SSH"
type = string
}
variable "proxmox_password" {
description = "Proxmox password (used for SSH)"
type = string
sensitive = true
}
variable "proxmox_ssh_user" {
description = "SSH username on Proxmox node"
type = string
default = "root"
}
variable "proxmox_node" {
description = "Target Proxmox node"
type = string
default = "pve"
}
variable "disk_storage" {
description = "Disk storage (e.g., local-lvm)"
type = string
default = "local-lvm"
}
variable "snippet_storage" {
description = "Storage with Snippets content"
type = string
default = "local"
}
variable "bridge" {
description = "Bridge (e.g., vmbr0)"
type = string
default = "vmbr0"
}
variable "vlan" {
description = "VLAN tag (0 none)"
type = number
default = 0
}
variable "clone_template_vmid" {
description = "VMID of the cloud-init base template to clone"
type = number
}
data "coder_workspace" "me" {}
data "coder_workspace_owner" "me" {}
data "coder_parameter" "cpu_cores" {
name = "cpu_cores"
display_name = "CPU Cores"
type = "number"
default = 2
mutable = true
}
data "coder_parameter" "memory_mb" {
name = "memory_mb"
display_name = "Memory (MB)"
type = "number"
default = 4096
mutable = true
}
data "coder_parameter" "disk_size_gb" {
name = "disk_size_gb"
display_name = "Disk Size (GB)"
type = "number"
default = 20
mutable = true
validation {
min = 10
max = 100
monotonic = "increasing"
}
}
resource "coder_agent" "dev" {
arch = "amd64"
os = "linux"
env = {
GIT_AUTHOR_NAME = data.coder_workspace_owner.me.name
GIT_AUTHOR_EMAIL = data.coder_workspace_owner.me.email
}
startup_script_behavior = "non-blocking"
startup_script = <<-EOT
set -e
# Add any startup scripts here
EOT
metadata {
display_name = "CPU Usage"
key = "cpu_usage"
script = "coder stat cpu"
interval = 10
timeout = 1
order = 1
}
metadata {
display_name = "RAM Usage"
key = "ram_usage"
script = "coder stat mem"
interval = 10
timeout = 1
order = 2
}
metadata {
display_name = "Disk Usage"
key = "disk_usage"
script = "coder stat disk"
interval = 600
timeout = 30
order = 3
}
}
locals {
hostname = lower(data.coder_workspace.me.name)
vm_name = "coder-${lower(data.coder_workspace_owner.me.name)}-${local.hostname}"
snippet_filename = "${local.vm_name}.yml"
base_user = replace(replace(replace(lower(data.coder_workspace_owner.me.name), " ", "-"), "/", "-"), "@", "-") # to avoid special characters in the username
linux_user = contains(["root", "admin", "daemon", "bin", "sys"], local.base_user) ? "${local.base_user}1" : local.base_user # to avoid conflict with system users
rendered_user_data = templatefile("${path.module}/cloud-init/user-data.tftpl", {
coder_token = coder_agent.dev.token
coder_init_script_b64 = base64encode(coder_agent.dev.init_script)
hostname = local.vm_name
linux_user = local.linux_user
})
}
resource "proxmox_virtual_environment_file" "cloud_init_user_data" {
content_type = "snippets"
datastore_id = var.snippet_storage
node_name = var.proxmox_node
source_raw {
data = local.rendered_user_data
file_name = local.snippet_filename
}
}
resource "proxmox_virtual_environment_vm" "workspace" {
name = local.vm_name
node_name = var.proxmox_node
clone {
node_name = var.proxmox_node
vm_id = var.clone_template_vmid
full = false
retries = 5
}
agent {
enabled = true
}
on_boot = true
started = true
startup {
order = 1
}
scsi_hardware = "virtio-scsi-pci"
boot_order = ["scsi0", "ide2"]
memory {
dedicated = data.coder_parameter.memory_mb.value
}
cpu {
cores = data.coder_parameter.cpu_cores.value
sockets = 1
type = "host"
}
network_device {
bridge = var.bridge
model = "virtio"
vlan_id = var.vlan == 0 ? null : var.vlan
}
vga {
type = "serial0"
}
serial_device {
device = "socket"
}
disk {
interface = "scsi0"
datastore_id = var.disk_storage
size = data.coder_parameter.disk_size_gb.value
}
initialization {
type = "nocloud"
datastore_id = var.disk_storage
user_data_file_id = proxmox_virtual_environment_file.cloud_init_user_data.id
ip_config {
ipv4 {
address = "dhcp"
}
}
}
tags = ["coder", "workspace", local.vm_name]
depends_on = [proxmox_virtual_environment_file.cloud_init_user_data]
}
module "code-server" {
count = data.coder_workspace.me.start_count
source = "registry.coder.com/coder/code-server/coder"
version = "1.3.1"
agent_id = coder_agent.dev.id
}
module "cursor" {
count = data.coder_workspace.me.start_count
source = "registry.coder.com/coder/cursor/coder"
version = "1.3.0"
agent_id = coder_agent.dev.id
}

View File

@@ -0,0 +1,17 @@
# Proxmox API
proxmox_api_url = "https://<PVE_HOST>:8006/api2/json"
proxmox_api_token_id = "<USER@REALM>!<TOKEN>"
proxmox_api_token_secret = "<SECRET>"
# SSH to the node (for snippet upload)
proxmox_host = "<PVE_HOST>"
proxmox_password = "<NODE_ROOT_OR_SUDO_PASSWORD>"
proxmox_ssh_user = "root"
# Infra defaults
proxmox_node = "pve"
disk_storage = "local-lvm"
snippet_storage = "local"
bridge = "vmbr0"
vlan = 0
clone_template_vmid = 999

View File

@@ -110,9 +110,10 @@ services:
- 6767:6767 - 6767:6767
restart: unless-stopped restart: unless-stopped
jellyseerr: seerr:
image: fallenbagel/jellyseerr:latest image: ghcr.io/seerr-team/seerr:latest
container_name: jellyseerr init: true
container_name: seerr
environment: environment:
- LOG_LEVEL=debug - LOG_LEVEL=debug
- TZ=Canada/Eastern - TZ=Canada/Eastern

View File

@@ -0,0 +1,17 @@
PUID=1000
PGID=100
AUTHENTIK_SECRET_KEY=supersecretkey
PG_PASS=supersecretpassword
AUTHENTIK_EMAIL__HOST=smtp.gmail.com
AUTHENTIK_EMAIL__PORT=587
AUTHENTIK_EMAIL__USERNAME=admin@example.com
AUTHENTIK_EMAIL__PASSWORD=password123
AUTHENTIK_EMAIL__USE_TLS=true
AUTHENTIK_EMAIL__USE_SSL=false
AUTHENTIK_EMAIL__TIMEOUT=10
AUTHENTIK_EMAIL__FROM=auth@example.com
COMPOSE_PORT_HTTP=10000
COMPOSE_PORT_HTTPS=10443
AUTHENTIK_ERROR_REPORTING__ENABLED=true
AUTHENTIK_TAG=2025.10
CONFIG_PATH=/srv/dev-disk-by-uuid-7acaa21a-aa26-4605-bb36-8f4c9c1a7695/configs/authentik

View File

@@ -0,0 +1,120 @@
---
services:
postgresql:
image: docker.io/library/postgres:16-alpine
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}"]
start_period: 20s
interval: 30s
retries: 5
timeout: 5s
volumes:
- database:/var/lib/postgresql/data
environment:
POSTGRES_PASSWORD: ${PG_PASS:?database password required}
POSTGRES_USER: ${PG_USER:-authentik}
POSTGRES_DB: ${PG_DB:-authentik}
redis:
image: docker.io/library/redis:alpine
command: --save 60 1 --loglevel warning
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "redis-cli ping | grep PONG"]
start_period: 20s
interval: 30s
retries: 5
timeout: 3s
volumes:
- redis:/data
server:
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2024.12.3}
restart: unless-stopped
command: server
environment:
AUTHENTIK_REDIS__HOST: redis
AUTHENTIK_POSTGRESQL__HOST: postgresql
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
AUTHENTIK_SECRET_KEY: ${AUTHENTIK_SECRET_KEY}
AUTHENTIK_EMAIL__HOST: ${AUTHENTIK_EMAIL__HOST}
AUTHENTIK_EMAIL__PORT: ${AUTHENTIK_EMAIL__PORT}
AUTHENTIK_EMAIL__USERNAME: ${AUTHENTIK_EMAIL__USERNAME}
AUTHENTIK_EMAIL__PASSWORD: ${AUTHENTIK_EMAIL__PASSWORD}
AUTHENTIK_EMAIL__USE_TLS: ${AUTHENTIK_EMAIL__USE_TLS}
AUTHENTIK_EMAIL__USE_SSL: ${AUTHENTIK_EMAIL__USE_SSL}
AUTHENTIK_EMAIL__TIMEOUT: ${AUTHENTIK_EMAIL__TIMEOUT}
AUTHENTIK_EMAIL__FROM: ${AUTHENTIK_EMAIL__FROM}
volumes:
- media:/data/media
- custom-templates:/templates
ports:
- "${COMPOSE_PORT_HTTP:-9000}:9000"
- "${COMPOSE_PORT_HTTPS:-9443}:9443"
depends_on:
postgresql:
condition: service_healthy
redis:
condition: service_healthy
worker:
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2024.12.3}
restart: unless-stopped
command: worker
environment:
AUTHENTIK_REDIS__HOST: redis
AUTHENTIK_POSTGRESQL__HOST: postgresql
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
AUTHENTIK_SECRET_KEY: ${AUTHENTIK_SECRET_KEY}
AUTHENTIK_EMAIL__HOST: ${AUTHENTIK_EMAIL__HOST}
AUTHENTIK_EMAIL__PORT: ${AUTHENTIK_EMAIL__PORT}
AUTHENTIK_EMAIL__USERNAME: ${AUTHENTIK_EMAIL__USERNAME}
AUTHENTIK_EMAIL__PASSWORD: ${AUTHENTIK_EMAIL__PASSWORD}
AUTHENTIK_EMAIL__USE_TLS: ${AUTHENTIK_EMAIL__USE_TLS}
AUTHENTIK_EMAIL__USE_SSL: ${AUTHENTIK_EMAIL__USE_SSL}
AUTHENTIK_EMAIL__TIMEOUT: ${AUTHENTIK_EMAIL__TIMEOUT}
AUTHENTIK_EMAIL__FROM: ${AUTHENTIK_EMAIL__FROM}
# `user: root` and the docker socket volume are optional.
# See more for the docker socket integration here:
# https://goauthentik.io/docs/outposts/integrations/docker
# Removing `user: root` also prevents the worker from fixing the permissions
# on the mounted folders, so when removing this make sure the folders have the correct UID/GID
# (1000:1000 by default)
user: root
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- media:/data/media
- certs:/certs
- custom-templates:/templates
depends_on:
postgresql:
condition: service_healthy
redis:
condition: service_healthy
authentik_ldap:
image: ghcr.io/goauthentik/ldap:${AUTHENTIK_TAG:-2024.12.3}
restart: unless-stopped
ports:
- 3389:3389
- 6636:6636
environment:
AUTHENTIK_HOST: https://auth.pcenicni.ca
AUTHENTIK_INSECURE: "false"
AUTHENTIK_TOKEN: 2OutrpIACRD41JdhjiZE6zSL8I48RpwkvnDRVbEPnllDnzdcxO9UJ26iS08Q
depends_on:
postgresql:
condition: service_healthy
volumes:
database:
driver: local
redis:
driver: local
media:
driver: local
certs:
driver: local
custom-templates:
driver: local

View File

@@ -0,0 +1,11 @@
POSTGRES_USER=coder
POSTGRES_PASSWORD=password
POSTGRES_DB=coder
CODER_ACCESS_URL=coder.example.com
CODER_OIDC_ISSUER_URL="https://auth.example.com/application/o/coder"
CODER_OIDC_EMAIL_DOMAIN="${CODER_OIDC_EMAIL_DOMAIN}"
CODER_OIDC_CLIENT_ID="${CODER_OIDC_CLIENT_ID}"
CODER_OIDC_CLIENT_SECRET="${CODER_OIDC_CLIENT_SECRET}"
CODER_OIDC_IGNORE_EMAIL_VERIFIED=true
CODER_OIDC_SIGN_IN_TEXT="Sign in with Authentik"

View File

@@ -0,0 +1,65 @@
services:
coder:
# This MUST be stable for our documentation and
# other automations.
image: ${CODER_REPO:-ghcr.io/coder/coder}:${CODER_VERSION:-latest}
ports:
- "7080:7080"
environment:
CODER_PG_CONNECTION_URL: "postgresql://${POSTGRES_USER:-username}:${POSTGRES_PASSWORD:-password}@database/${POSTGRES_DB:-coder}?sslmode=disable"
CODER_HTTP_ADDRESS: "0.0.0.0:7080"
# You'll need to set CODER_ACCESS_URL to an IP or domain
# that workspaces can reach. This cannot be localhost
# or 127.0.0.1 for non-Docker templates!
CODER_ACCESS_URL: "${CODER_ACCESS_URL}"
# OpenID connect config
CODER_OIDC_ISSUER_URL: "${CODER_OIDC_ISSUER_URL}"
#CODER_OIDC_EMAIL_DOMAIN: "${CODER_OIDC_EMAIL_DOMAIN}"
CODER_OIDC_CLIENT_ID: "${CODER_OIDC_CLIENT_ID}"
CODER_OIDC_CLIENT_SECRET: "${CODER_OIDC_CLIENT_SECRET}"
CODER_OIDC_IGNORE_EMAIL_VERIFIED: true
CODER_OIDC_SIGN_IN_TEXT: "Sign in with Authentik"
CODER_OIDC_ICON_URL: https://authentik.company/static/dist/assets/icons/icon.png
CODER_OIDC_SCOPES: openid,profile,email,offline_access
# If the coder user does not have write permissions on
# the docker socket, you can uncomment the following
# lines and set the group ID to one that has write
# permissions on the docker socket.
group_add:
- "988" # docker group on host
volumes:
- /var/run/docker.sock:/var/run/docker.sock
# Run "docker volume rm coder_coder_home" to reset the dev tunnel url (https://abc.xyz.try.coder.app).
# This volume is not required in a production environment - you may safely remove it.
# Coder can recreate all the files it needs on restart.
- coder_home:/home/coder
depends_on:
database:
condition: service_healthy
database:
# Minimum supported version is 13.
# More versions here: https://hub.docker.com/_/postgres
image: "postgres:17"
# Uncomment the next two lines to allow connections to the database from outside the server.
#ports:
# - "5432:5432"
environment:
POSTGRES_USER: ${POSTGRES_USER:-username} # The PostgreSQL user (useful to connect to the database)
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-password} # The PostgreSQL password (useful to connect to the database)
POSTGRES_DB: ${POSTGRES_DB:-coder} # The PostgreSQL default database (automatically created at first launch)
volumes:
- coder_data:/var/lib/postgresql/data # Use "docker volume rm coder_coder_data" to reset Coder
healthcheck:
test:
[
"CMD-SHELL",
"pg_isready -U ${POSTGRES_USER:-username} -d ${POSTGRES_DB:-coder}",
]
interval: 5s
timeout: 5s
retries: 5
volumes:
coder_data:
coder_home:

View File

@@ -0,0 +1,48 @@
# MySQL Configuration
MYSQL_ROOT_PASSWORD=change_this_root_password
MYSQL_DATABASE=fleet
MYSQL_USER=fleet
MYSQL_PASSWORD=change_this_fleet_password
# Fleet Server Configuration
# Generate a random key with: openssl rand -base64 32
FLEET_SERVER_PRIVATE_KEY=change_this_private_key
# Fleet HTTP Listener Configuration
FLEET_SERVER_ADDRESS=0.0.0.0
FLEET_SERVER_PORT=1337
# TLS Configuration
# Set to 'true' if Fleet handles TLS directly (requires certificates in ./certs/)
# Set to 'false' if using a reverse proxy or load balancer for TLS termination
FLEET_SERVER_TLS=false
# TLS Certificate paths (only needed if FLEET_SERVER_TLS=true)
FLEET_SERVER_CERT=/fleet/fleet.crt
FLEET_SERVER_KEY=/fleet/fleet.key
# Fleet License (optional - leave empty for free tier)
FLEET_LICENSE_KEY=
# Fleet Session & Logging
FLEET_SESSION_DURATION=24h
FLEET_LOGGING_JSON=true
# Fleet Osquery Configuration
FLEET_OSQUERY_STATUS_LOG_PLUGIN=filesystem
FLEET_FILESYSTEM_STATUS_LOG_FILE=/logs/osqueryd.status.log
FLEET_FILESYSTEM_RESULT_LOG_FILE=/logs/osqueryd.results.log
FLEET_OSQUERY_LABEL_UPDATE_INTERVAL=1h
# Fleet Vulnerabilities
FLEET_VULNERABILITIES_CURRENT_INSTANCE_CHECKS=yes
FLEET_VULNERABILITIES_DATABASES_PATH=/vulndb
FLEET_VULNERABILITIES_PERIODICITY=1h
# S3 Configuration (optional - leave empty if not using S3)
FLEET_S3_SOFTWARE_INSTALLERS_BUCKET=
FLEET_S3_SOFTWARE_INSTALLERS_ACCESS_KEY_ID=
FLEET_S3_SOFTWARE_INSTALLERS_SECRET_ACCESS_KEY=
FLEET_S3_SOFTWARE_INSTALLERS_FORCE_S3_PATH_STYLE=
FLEET_S3_SOFTWARE_INSTALLERS_ENDPOINT_URL=
FLEET_S3_SOFTWARE_INSTALLERS_REGION=

View File

@@ -0,0 +1,119 @@
volumes:
mysql:
redis:
data:
logs:
vulndb:
services:
mysql:
image: mysql:8
platform: linux/x86_64
environment:
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
- MYSQL_DATABASE=${MYSQL_DATABASE}
- MYSQL_USER=${MYSQL_USER}
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
volumes:
- mysql:/var/lib/mysql
cap_add:
- SYS_NICE
healthcheck:
test:
[
"CMD-SHELL",
"mysqladmin ping -h 127.0.0.1 -u$$MYSQL_USER -p$$MYSQL_PASSWORD --silent || exit 1",
]
interval: 10s
timeout: 5s
retries: 12
ports:
- "3306:3306"
restart: unless-stopped
redis:
image: redis:6
command: ["redis-server", "--appendonly", "yes"]
volumes:
- redis:/data
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 12
ports:
- "6379:6379"
restart: unless-stopped
fleet-init:
image: alpine:latest
volumes:
- logs:/logs
- data:/data
- vulndb:/vulndb
command: sh -c "chown -R 100:101 /logs /data /vulndb"
fleet:
image: fleetdm/fleet
platform: linux/x86_64
depends_on:
mysql:
condition: service_healthy
redis:
condition: service_healthy
fleet-init:
condition: service_completed_successfully
command: sh -c "/usr/bin/fleet prepare db --no-prompt && /usr/bin/fleet serve"
environment:
# In-cluster service addresses (no hostnames/ports on the host)
- FLEET_REDIS_ADDRESS=redis:6379
- FLEET_MYSQL_ADDRESS=mysql:3306
- FLEET_MYSQL_DATABASE=${MYSQL_DATABASE}
- FLEET_MYSQL_USERNAME=${MYSQL_USER}
- FLEET_MYSQL_PASSWORD=${MYSQL_PASSWORD}
# Fleet HTTP listener
- FLEET_SERVER_ADDRESS=${FLEET_SERVER_ADDRESS}:${FLEET_SERVER_PORT}
- FLEET_SERVER_TLS=${FLEET_SERVER_TLS}
# TLS Certificate paths (only needed if FLEET_SERVER_TLS=true)
- FLEET_SERVER_CERT=${FLEET_SERVER_CERT}
- FLEET_SERVER_KEY=${FLEET_SERVER_KEY}
# Secrets
- FLEET_SERVER_PRIVATE_KEY=${FLEET_SERVER_PRIVATE_KEY} # Run 'openssl rand -base64 32' to generate
- FLEET_LICENSE_KEY=${FLEET_LICENSE_KEY}
# System tuning & other options
- FLEET_SESSION_DURATION=${FLEET_SESSION_DURATION}
- FLEET_LOGGING_JSON=${FLEET_LOGGING_JSON}
- FLEET_OSQUERY_STATUS_LOG_PLUGIN=${FLEET_OSQUERY_STATUS_LOG_PLUGIN}
- FLEET_FILESYSTEM_STATUS_LOG_FILE=${FLEET_FILESYSTEM_STATUS_LOG_FILE}
- FLEET_FILESYSTEM_RESULT_LOG_FILE=${FLEET_FILESYSTEM_RESULT_LOG_FILE}
- FLEET_OSQUERY_LABEL_UPDATE_INTERVAL=${FLEET_OSQUERY_LABEL_UPDATE_INTERVAL}
- FLEET_VULNERABILITIES_CURRENT_INSTANCE_CHECKS=${FLEET_VULNERABILITIES_CURRENT_INSTANCE_CHECKS}
- FLEET_VULNERABILITIES_DATABASES_PATH=${FLEET_VULNERABILITIES_DATABASES_PATH}
- FLEET_VULNERABILITIES_PERIODICITY=${FLEET_VULNERABILITIES_PERIODICITY}
# Optional S3 info
- FLEET_S3_SOFTWARE_INSTALLERS_BUCKET=${FLEET_S3_SOFTWARE_INSTALLERS_BUCKET}
- FLEET_S3_SOFTWARE_INSTALLERS_ACCESS_KEY_ID=${FLEET_S3_SOFTWARE_INSTALLERS_ACCESS_KEY_ID}
- FLEET_S3_SOFTWARE_INSTALLERS_SECRET_ACCESS_KEY=${FLEET_S3_SOFTWARE_INSTALLERS_SECRET_ACCESS_KEY}
- FLEET_S3_SOFTWARE_INSTALLERS_FORCE_S3_PATH_STYLE=${FLEET_S3_SOFTWARE_INSTALLERS_FORCE_S3_PATH_STYLE}
# Override FLEET_S3_SOFTWARE_INSTALLERS_ENDPOINT_URL when using a different S3 compatible
# object storage backend (such as RustFS) or running S3 locally with localstack.
# Leave this blank to use the default S3 service endpoint.
- FLEET_S3_SOFTWARE_INSTALLERS_ENDPOINT_URL=${FLEET_S3_SOFTWARE_INSTALLERS_ENDPOINT_URL}
# RustFS users should set FLEET_S3_SOFTWARE_INSTALLERS_REGION to any nonempty value (eg. localhost)
# to short-circuit region discovery
- FLEET_S3_SOFTWARE_INSTALLERS_REGION=${FLEET_S3_SOFTWARE_INSTALLERS_REGION}
ports:
- "${FLEET_SERVER_PORT}:${FLEET_SERVER_PORT}" # UI/API
volumes:
- data:/fleet
- logs:/logs
- vulndb:${FLEET_VULNERABILITIES_DATABASES_PATH}
# - ./certs/fleet.crt:/fleet/fleet.crt:ro
# - ./certs/fleet.key:/fleet/fleet.key:ro
healthcheck:
test:
["CMD", "wget", "-qO-", "http://127.0.0.1:${FLEET_SERVER_PORT}/healthz"]
interval: 10s
timeout: 5s
retries: 12
restart: unless-stopped

View File

@@ -0,0 +1,9 @@
OIDC_CLIENT_ID=your_oidc_client_id
OIDC_CLIENT_SECRET=your_oidc_client_secret
OIDC_ISSUER_URL=https://your-oidc-provider.com/application/o/termix/ # The base URL of your OIDC provider for this application. This is used to discover the authorization, token, and userinfo endpoints. It should end with a slash.
OIDC_AUTHORIZATION_URL=https://your-oidc-provider.com/application/o/authorize
OIDC_TOKEN_URL=https://your-oidc-provider.com/application/o/token
OIDC_USERINFO_URL=https://your-oidc-provider.com/application/o/userinfo
OIDC_IDENTIFIER_PATH=sub # The path in the OIDC userinfo response that contains the unique user identifier (default is 'sub')
OIDC_NAME_PATH=name # The path in the OIDC userinfo response that contains the user's display name (default is 'name')
OIDC_SCOPES=openid profile email

View File

@@ -0,0 +1,25 @@
---
services:
termix:
container_name: termix
image: ghcr.io/lukegus/termix:latest
restart: unless-stopped
ports:
- 8180:8080
volumes:
- termix-data:/app/data
environment:
PORT: 8080
OIDC_CLIENT_ID: ${OIDC_CLIENT_ID}
OIDC_CLIENT_SECRET: ${OIDC_CLIENT_SECRET}
OIDC_ISSUER_URL: ${OIDC_ISSUER_URL}
OIDC_AUTHORIZATION_URL: ${OIDC_AUTHORIZATION_URL}
OIDC_TOKEN_URL: ${OIDC_TOKEN_URL}
OIDC_USERINFO_URL: ${OIDC_USERINFO_URL}
OIDC_IDENTIFIER_PATH: ${OIDC_IDENTIFIER_PATH}
OIDC_NAME_PATH: ${OIDC_NAME_PATH}
OIDC_SCOPES: ${OIDC_SCOPES}
volumes:
termix-data:
driver: local

View File

@@ -0,0 +1,13 @@
services:
watchstate:
image: ghcr.io/arabcoders/watchstate:latest
# To change the user/group id associated with the tool change the following line.
user: "${UID:-1000}:${UID:-1000}"
container_name: watchstate
restart: unless-stopped
ports:
- "8080:8080" # The port which the watchstate will listen on.
volumes:
- watchstate:/config:rw # mount ./data in current directory to container /config directory.
volumes:
watchstate:

View File

@@ -0,0 +1,58 @@
# 🌐 Frontend
PUBLIC_SERVER_URL=http://server:8000 # PLEASE DON'T CHANGE :) - Should be the service name of the backend with port 8000, even if you change the port in the backend service. Only change if you are using a custom more complex setup.
ORIGIN=http://localhost:8015
BODY_SIZE_LIMIT=Infinity
FRONTEND_PORT=8015
# 🐘 PostgreSQL Database
PGHOST=db
POSTGRES_DB=database
POSTGRES_USER=adventure
POSTGRES_PASSWORD=changeme123
# 🔒 Django Backend
SECRET_KEY=changeme123
DJANGO_ADMIN_USERNAME=admin
DJANGO_ADMIN_PASSWORD=admin
DJANGO_ADMIN_EMAIL=admin@example.com
PUBLIC_URL=http://localhost:8016 # Match the outward port, used for the creation of image urls
CSRF_TRUSTED_ORIGINS=http://localhost:8016,http://localhost:8015
DEBUG=False
FRONTEND_URL=http://localhost:8015 # Used for email generation. This should be the url of the frontend
BACKEND_PORT=8016
# Optional: use Google Maps integration
# https://adventurelog.app/docs/configuration/google_maps_integration.html
# GOOGLE_MAPS_API_KEY=your_google_maps_api_key
# Optional: disable registration
# https://adventurelog.app/docs/configuration/disable_registration.html
DISABLE_REGISTRATION=False
# DISABLE_REGISTRATION_MESSAGE=Registration is disabled for this instance of AdventureLog.
# SOCIALACCOUNT_ALLOW_SIGNUP=False # When false, social providers cannot be used to create new user accounts when registration is disabled.
# FORCE_SOCIALACCOUNT_LOGIN=False # When true, only social login is allowed (no password login) and the login page will show only social providers or redirect directly to the first provider if only one is configured.
# ACCOUNT_EMAIL_VERIFICATION='none' # 'none', 'optional', 'mandatory' # You can change this as needed for your environment
# Optional: Use email
# https://adventurelog.app/docs/configuration/email.html
# EMAIL_BACKEND=email
# EMAIL_HOST=smtp.gmail.com
# EMAIL_USE_TLS=True
# EMAIL_PORT=587
# EMAIL_USE_SSL=False
# EMAIL_HOST_USER=user
# EMAIL_HOST_PASSWORD=password
# DEFAULT_FROM_EMAIL=user@example.com
# Optional: Use Strava integration
# https://adventurelog.app/docs/configuration/strava_integration.html
# STRAVA_CLIENT_ID=your_strava_client_id
# STRAVA_CLIENT_SECRET=your_strava_client_secret
# Optional: Use Umami for analytics
# https://adventurelog.app/docs/configuration/analytics.html
# PUBLIC_UMAMI_SRC=https://cloud.umami.is/script.js # If you are using the hosted version of Umami
# PUBLIC_UMAMI_WEBSITE_ID=

View File

@@ -0,0 +1,36 @@
services:
web:
#build: ./frontend/
image: ghcr.io/seanmorley15/adventurelog-frontend:latest
container_name: adventurelog-frontend
restart: unless-stopped
env_file: .env
ports:
- "${FRONTEND_PORT:-8015}:3000"
depends_on:
- server
db:
image: postgis/postgis:16-3.5
container_name: adventurelog-db
restart: unless-stopped
env_file: .env
volumes:
- postgres_data:/var/lib/postgresql/data/
server:
#build: ./backend/
image: ghcr.io/seanmorley15/adventurelog-backend:latest
container_name: adventurelog-backend
restart: unless-stopped
env_file: .env
ports:
- "${BACKEND_PORT:-8016}:80"
depends_on:
- db
volumes:
- adventurelog_media:/code/media/
volumes:
postgres_data:
adventurelog_media:

View File

@@ -1,9 +0,0 @@
services:
ossm-configurator:
image: ghcr.io/munchdev-oss/ossm-configurator:latest
container_name: ossm-configurator
ports:
- "2121:80"
restart: unless-stopped
environment:
- NODE_ENV=production

View File

@@ -0,0 +1,146 @@
# SparkyFitness Environment Variables
# Copy this file to .env in the root directory and fill in your own values before running 'docker-compose up'.
# --- PostgreSQL Database Settings ---
# These values should match the ones used by your PostgreSQL container.
# For local development (running Node.js directly), use 'localhost' or '127.0.0.1' if PostgreSQL is on your host.
SPARKY_FITNESS_DB_NAME=sparkyfitness_db
#SPARKY_FITNESS_DB_USER is super user for DB initialization and migrations.
SPARKY_FITNESS_DB_USER=sparky
SPARKY_FITNESS_DB_PASSWORD=changeme_db_password
# Application database user with limited privileges. it can be changed any time after initialization.
SPARKY_FITNESS_APP_DB_USER=sparky_app
SPARKY_FITNESS_APP_DB_PASSWORD=password
# For Docker Compose deployments, SPARKY_FITNESS_DB_HOST will be the service name (e.g., 'sparkyfitness-db').
SPARKY_FITNESS_DB_HOST=sparkyfitness-db
#SPARKY_FITNESS_DB_PORT=5432 # Optional. Defaults to 5432 if not specified.
# --- Backend Server Settings ---
# The hostname or IP address of the backend server.
# For Docker Compose, this is typically the service name (e.g., 'sparkyfitness-server').
# For local development or other deployments, this might be 'localhost' or a specific IP.
SPARKY_FITNESS_SERVER_HOST=sparkyfitness-server
# The external port the server will be exposed on.
SPARKY_FITNESS_SERVER_PORT=3010
# The public URL of your frontend (e.g., https://fitness.example.com). This is crucial for CORS security.
# For local development, use http://localhost:8080. For production, use your domain with https.
SPARKY_FITNESS_FRONTEND_URL=http://localhost:8080
# Allow CORS requests from private network addresses (10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16, localhost, etc.)
# SECURITY WARNING: Only enable this if you are running on a private/self-hosted network.
# Do NOT enable on shared hosting or cloud environments where other users might access your network.
# Default: false (secure default - only the configured SPARKY_FITNESS_FRONTEND_URL is allowed)
#ALLOW_PRIVATE_NETWORK_CORS=false
# A comma-separated list of additional URLs that Better Auth should trust.
# This is useful when accessing the app from a specific local IP on your network.
# Example: SPARKY_FITNESS_EXTRA_TRUSTED_ORIGINS=http://192.168.1.175:8080,http://10.0.0.5:8080
# SPARKY_FITNESS_EXTRA_TRUSTED_ORIGINS=
# Logging level for the server (e.g., INFO, DEBUG, WARN, ERROR)
SPARKY_FITNESS_LOG_LEVEL=ERROR
# Node.js environment mode (e.g., development, production, test)
# Set to 'production' for deployment to ensure optimal performance and security.
NODE_ENV=production
# Server timezone. Use a TZ database name (e.g., 'America/New_York', 'Etc/UTC').
# This affects how dates/times are handled by the server if not explicitly managed in code.
TZ=Etc/UTC
# --- Security Settings ---
# A 64-character hex string for data encryption.
# You can generate a secure key with the following command:
# openssl rand -hex 32
# or
# node -e "console.log(require('crypto').randomBytes(32).toString('hex'))"
# Changing this will invalidate existing encrypted data. You will need to delete and add External Data sources again.
SPARKY_FITNESS_API_ENCRYPTION_KEY=changeme_replace_with_a_64_character_hex_string
# For Docker Swarm/Kubernetes secrets, you can use a file-based secret:
# SPARKY_FITNESS_API_ENCRYPTION_KEY_FILE=/run/secrets/sparkyfitness_api_key
BETTER_AUTH_SECRET=changeme_replace_with_a_strong_better_auth_secret
# For Docker Swarm/Kubernetes secrets, you can use a file-based secret:
# BETTER_AUTH_SECRET_FILE=/run/secrets/sparkyfitness_better_auth_secret
# --- Signup Settings ---
# Set to 'true' to disable new user registrations.
SPARKY_FITNESS_DISABLE_SIGNUP=false
# --- Admin Settings ---
# Set the email of a user to automatically grant admin privileges on server startup.
# This is useful for development or initial setup.
# Example: SPARKY_FITNESS_ADMIN_EMAIL=admin@example.com
# Optional. If not set, no admin user will be created automatically.
# SPARKY_FITNESS_ADMIN_EMAIL=
# --- Login Management Fail-Safe ---
# Set to 'true' to force email/password login to be enabled, overriding any in-app settings.
# This is a fail-safe to prevent being locked out if OIDC is misconfigured.
SPARKY_FITNESS_FORCE_EMAIL_LOGIN=true
# --- Email Settings (Optional) ---
# Configure these variables if you want to enable email notifications (e.g., for password resets).
# If not configured, email functionality will be disabled.
# SPARKY_FITNESS_EMAIL_HOST=smtp.example.com
# SPARKY_FITNESS_EMAIL_PORT=587
# SPARKY_FITNESS_EMAIL_SECURE=true # Use 'true' for TLS/SSL, 'false' for plain text
# SPARKY_FITNESS_EMAIL_USER=your_email@example.com
# SPARKY_FITNESS_EMAIL_PASS=your_email_password
# SPARKY_FITNESS_EMAIL_FROM=no-reply@example.com
# --- Volume Paths (Optional) ---
# These paths define where Docker volumes will store persistent data on your host.
# If not set, Docker will manage volumes automatically in its default location.
# DB_PATH=../postgresql # Path for PostgreSQL database data
# SERVER_BACKUP_PATH=./backup # Path for server backups
# SERVER_UPLOADS_PATH=./uploads # Path for profile pictures and exercise images
# --- API Key Rate Limiting (Optional) ---
# Override the default rate limit for API key authentication (used by automation tools like n8n).
# Defaults to 100 requests per 60-second window if not set.
#SPARKY_FITNESS_API_KEY_RATELIMIT_WINDOW_MS=60000
#SPARKY_FITNESS_API_KEY_RATELIMIT_MAX_REQUESTS=100
# --- Start of Garmin Integration Settings ---
#Below variables are needed only for Garmin integration. If you don't use Garmin integration, you can remove them in your .env file.
# The URL for the Garmin microservice.
# For Docker Compose, this would typically be the service name and port (e.g., 'http://sparkyfitness-garmin:8000').
# For local development, use 'http://localhost:8000' or the port you've configured.
GARMIN_MICROSERVICE_URL=http://sparkyfitness-garmin:8000
# This is used for Garmin Connect synchronization.
# If you are not using Garmin integration, you don't need this. Make sure this matches with GARMIN_MICROSERVICE_URL.
GARMIN_SERVICE_PORT=8000
# set to true for China region. Everything else should be false. Optional - defaults to false
GARMIN_SERVICE_IS_CN=false
# --- End of Garmin Integration Settings ---
#----- Developers Section -----
# Data source for external integrations (fitbit, garmin, withings).
# By default, these use live APIs. Set to 'local' to use mock data from the mock_data directory.
#SPARKY_FITNESS_FITBIT_DATA_SOURCE=local
#SPARKY_FITNESS_WITHINGS_DATA_SOURCE=local
#SPARKY_FITNESS_GARMIN_DATA_SOURCE=local
#SPARKY_FITNESS_POLAR_DATA_SOURCE=local
#SPARKY_FITNESS_HEVY_DATA_SOURCE=local
# Set to 'true' to capture live API responses into mock data JSON files. Defaults to false.
#SPARKY_FITNESS_SAVE_MOCK_DATA=false
#-----------------------------

View File

@@ -0,0 +1,85 @@
services:
sparkyfitness-db:
image: postgres:15-alpine
restart: always
environment:
POSTGRES_DB: ${SPARKY_FITNESS_DB_NAME:?Variable is required and must be set}
POSTGRES_USER: ${SPARKY_FITNESS_DB_USER:?Variable is required and must be set}
POSTGRES_PASSWORD: ${SPARKY_FITNESS_DB_PASSWORD:?Variable is required and must be set}
volumes:
- ${DB_PATH:-../postgresql}:/var/lib/postgresql/data
networks:
- sparkyfitness-network # Use the new named network
sparkyfitness-server:
image: codewithcj/sparkyfitness_server:latest # Use pre-built image
environment:
SPARKY_FITNESS_LOG_LEVEL: ${SPARKY_FITNESS_LOG_LEVEL}
ALLOW_PRIVATE_NETWORK_CORS: ${ALLOW_PRIVATE_NETWORK_CORS:-false}
SPARKY_FITNESS_EXTRA_TRUSTED_ORIGINS: ${SPARKY_FITNESS_EXTRA_TRUSTED_ORIGINS:-}
SPARKY_FITNESS_DB_USER: ${SPARKY_FITNESS_DB_USER:-sparky}
SPARKY_FITNESS_DB_HOST: ${SPARKY_FITNESS_DB_HOST:-sparkyfitness-db} # Use the service name 'sparkyfitness-db' for inter-container communication
SPARKY_FITNESS_DB_NAME: ${SPARKY_FITNESS_DB_NAME}
SPARKY_FITNESS_DB_PASSWORD: ${SPARKY_FITNESS_DB_PASSWORD:?Variable is required and must be set}
SPARKY_FITNESS_APP_DB_USER: ${SPARKY_FITNESS_APP_DB_USER:-sparkyapp}
SPARKY_FITNESS_APP_DB_PASSWORD: ${SPARKY_FITNESS_APP_DB_PASSWORD:?Variable is required and must be set}
SPARKY_FITNESS_DB_PORT: ${SPARKY_FITNESS_DB_PORT:-5432}
SPARKY_FITNESS_API_ENCRYPTION_KEY: ${SPARKY_FITNESS_API_ENCRYPTION_KEY:?Variable is required and must be set}
# Uncomment the line below and comment the line above to use a file-based secret
# SPARKY_FITNESS_API_ENCRYPTION_KEY_FILE: /run/secrets/sparkyfitness_api_key
BETTER_AUTH_SECRET: ${BETTER_AUTH_SECRET:?Variable is required and must be set}
# Uncomment the line below and comment the line above to use a file-based secret
# JWT_SECRET_FILE: /run/secrets/sparkyfitness_jwt_secret
SPARKY_FITNESS_FRONTEND_URL: ${SPARKY_FITNESS_FRONTEND_URL:-http://0.0.0.0:3004}
SPARKY_FITNESS_DISABLE_SIGNUP: ${SPARKY_FITNESS_DISABLE_SIGNUP}
SPARKY_FITNESS_ADMIN_EMAIL: ${SPARKY_FITNESS_ADMIN_EMAIL} #User with this email can access the admin panel
SPARKY_FITNESS_EMAIL_HOST: ${SPARKY_FITNESS_EMAIL_HOST}
SPARKY_FITNESS_EMAIL_PORT: ${SPARKY_FITNESS_EMAIL_PORT}
SPARKY_FITNESS_EMAIL_SECURE: ${SPARKY_FITNESS_EMAIL_SECURE}
SPARKY_FITNESS_EMAIL_USER: ${SPARKY_FITNESS_EMAIL_USER}
SPARKY_FITNESS_EMAIL_PASS: ${SPARKY_FITNESS_EMAIL_PASS}
SPARKY_FITNESS_EMAIL_FROM: ${SPARKY_FITNESS_EMAIL_FROM}
GARMIN_MICROSERVICE_URL: http://sparkyfitness-garmin:8000 # Add Garmin microservice URL
networks:
- sparkyfitness-network # Use the new named network
restart: always
depends_on:
- sparkyfitness-db # Backend depends on the database being available
volumes:
- ${SERVER_BACKUP_PATH:-./backup}:/app/SparkyFitnessServer/backup # Mount volume for backups
- ${SERVER_UPLOADS_PATH:-./uploads}:/app/SparkyFitnessServer/uploads # Mount volume for Profile pictures and excercise images
sparkyfitness-frontend:
image: codewithcj/sparkyfitness:latest # Use pre-built image
ports:
- "3004:80" # Map host port 8080 to container port 80 (Nginx)
environment:
SPARKY_FITNESS_FRONTEND_URL: ${SPARKY_FITNESS_FRONTEND_URL}
SPARKY_FITNESS_SERVER_HOST: sparkyfitness-server # Internal Docker service name for the backend
SPARKY_FITNESS_SERVER_PORT: 3010 # Port the backend server listens on
networks:
- sparkyfitness-network # Use the new named network
restart: always
depends_on:
- sparkyfitness-server # Frontend depends on the server
#- sparkyfitness-garmin # Frontend depends on Garmin microservice. Enable if you are using Garmin Connect features.
# Garmin integration is still work in progress. Enable once table is ready.
# sparkyfitness-garmin:
# image: codewithcj/sparkyfitness_garmin:latest
# container_name: sparkyfitness-garmin
# environment:
# GARMIN_MICROSERVICE_URL: http://sparkyfitness-garmin:${GARMIN_SERVICE_PORT}
# GARMIN_SERVICE_PORT: ${GARMIN_SERVICE_PORT}
# GARMIN_SERVICE_IS_CN: ${GARMIN_SERVICE_IS_CN} # set to true for China region. Everything else should be false. Optional - defaults to false
# networks:
# - sparkyfitness-network
# restart: unless-stopped
# depends_on:
# - sparkyfitness-db
# - sparkyfitness-server
networks:
sparkyfitness-network:
driver: bridge

View File

@@ -8,8 +8,10 @@ services:
- PGID=${PGID} - PGID=${PGID}
- TZ=Canada/Eastern - TZ=Canada/Eastern
volumes: volumes:
- ${CONFIG_PATH}/jellyfin:/config - jellyfin:/config
- ${DATA_PATH}/media/:/data/media - ${DATA_PATH}/media/:/data/media
ports: ports:
- 8096:8096 - 8096:8096
restart: unless-stopped restart: unless-stopped
volumes:
jellyfin:

259
macos/Jellyfin-NFS.md Normal file
View File

@@ -0,0 +1,259 @@
# Jellyfin + macOS: Persistent NFS Mount (Fix for Libraries Randomly “Clearing”)
This README documents the working fix I applied when Jellyfin (on a Mac mini) periodically “lost” or cleared my Movies/TV libraries that live on a NAS mounted over NFS.
It includes the exact commands, files, and rationale so I can reproduce it later.
---
## Problem Summary
- Symptom: Every day or two, Jellyfin showed empty Movies/TV libraries.
- Media location: NFS share at `/Volumes/media` from NAS `192.168.50.105:/media`.
- Root cause: macOS was using autofs (`/- /etc/auto_nfs`). autofs can unmount after inactivity or brief network blips. When the mount disappears during a Jellyfin scan/file-watch, Jellyfin sees files as missing and removes them from its DB.
## Solution Summary
- Stop using autofs for this path.
- Create a persistent mount at boot using a LaunchDaemon and a small networkaware mount script.
- The script:
- Is idempotent: does nothing if already mounted.
- Checks NAS reachability first.
- Logs to `/var/log/mount_media.(out|err)`.
- Optionally restarts Jellyfin (Homebrew service) if the mount comes back.
---
## Prerequisites / Assumptions
- macOS with admin (sudo) access.
- NFS server: `192.168.50.105` exporting `/media` (adjust as needed).
- Mount point: `/Volumes/media` (adjust as needed).
- Jellyfin installed (optional Homebrew service restart in script).
> Tip: If your NAS requires privileged source ports for NFSv4, `resvport` helps. The script falls back to `noresvport` if needed.
---
## Steps (copy/paste commands)
### 1) Disable autofs for this path and unmount any automounted share
```
# Backup and comment out the direct map for NFS
sudo cp /etc/auto_master /etc/auto_master.bak.$(date +%F_%H%M%S)
sudo sed -i.bak 's|^/- /etc/auto_nfs|#/- /etc/auto_nfs|' /etc/auto_master
# Reload automountd (will unmount /Volumes/media if it was automounted)
sudo automount -vc
# Ensure the mountpoint is not currently mounted (ignore errors if already unmounted)
sudo umount /Volumes/media 2>/dev/null || sudo umount -f /Volumes/media 2>/dev/null || true
```
> Note: If `chown`/`chmod` say “Operation not permitted,” the path is still mounted (or your NAS has root-squash). Unmount first.
---
### 2) Create the networkaware mount script
```
sudo mkdir -p /usr/local/sbin
sudo tee /usr/local/sbin/mount_media_nfs.sh > /dev/null <<'SH'
#!/bin/sh
set -eu
LOG="/var/log/mount_media.out"
ERR="/var/log/mount_media.err"
MOUNT="/Volumes/media"
SERVER="192.168.50.105:/media"
HOST="${SERVER%%:*}"
# Ensure mountpoint exists
[ -d "$MOUNT" ] || mkdir -p "$MOUNT"
# If already mounted as NFS, exit quietly
if mount -t nfs | awk '{print $3}' | grep -qx "$MOUNT"; then
echo "$(date) already mounted: $MOUNT" >>"$LOG"
exit 0
fi
# Preflight: only try to mount when NFS port is reachable
if ! /usr/bin/nc -G 2 -z "$HOST" 2049 >/dev/null 2>&1; then
echo "$(date) NAS not reachable on 2049, skipping mount" >>"$LOG"
exit 0
fi
echo "$(date) mounting $SERVER -> $MOUNT" >>"$LOG"
/sbin/mount -t nfs -o resvport,hard,nfsvers=4.0 "$SERVER" "$MOUNT" >>"$LOG" 2>>"$ERR" || \
/sbin/mount -t nfs -o noresvport,hard,nfsvers=4.0 "$SERVER" "$MOUNT" >>"$LOG" 2>>"$ERR"
# Verify mount succeeded via mount(8)
if mount -t nfs | awk '{print $3}' | grep -qx "$MOUNT"; then
echo "$(date) mount OK: $MOUNT" >>"$LOG"
# Optional: restart Jellyfin if installed via Homebrew
if command -v brew >/dev/null 2>&1 && brew services list | grep -q '^jellyfin\b'; then
echo "$(date) restarting Jellyfin (brew services)" >>"$LOG"
brew services restart jellyfin >>"$LOG" 2>>"$ERR" || true
fi
else
echo "$(date) mount FAILED" >>"$ERR"
exit 1
fi
SH
sudo chmod 755 /usr/local/sbin/mount_media_nfs.sh
```
---
### 3) Create the LaunchDaemon (mount at boot, re-check periodically, networkaware)
```
sudo tee /Library/LaunchDaemons/com.local.mountmedia.plist > /dev/null <<'PLIST'
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>com.local.mountmedia</string>
<key>ProgramArguments</key>
<array>
<string>/usr/local/sbin/mount_media_nfs.sh</string>
</array>
<key>RunAtLoad</key>
<true/>
<key>StartInterval</key>
<integer>300</integer>
<key>KeepAlive</key>
<dict>
<key>NetworkState</key>
<true/>
</dict>
<key>StandardOutPath</key>
<string>/var/log/mount_media.out</string>
<key>StandardErrorPath</key>
<string>/var/log/mount_media.err</string>
</dict>
</plist>
PLIST
sudo chown root:wheel /Library/LaunchDaemons/com.local.mountmedia.plist
sudo chmod 644 /Library/LaunchDaemons/com.local.mountmedia.plist
sudo plutil -lint /Library/LaunchDaemons/com.local.mountmedia.plist
sudo launchctl bootout system /Library/LaunchDaemons/com.local.mountmedia.plist 2>/dev/null || true
sudo launchctl bootstrap system /Library/LaunchDaemons/com.local.mountmedia.plist
sudo launchctl enable system/com.local.mountmedia
sudo launchctl kickstart -k system/com.local.mountmedia
```
---
### 4) Run once and verify
```
# Run once now (idempotent; logs "already mounted" if present)
sudo /usr/local/sbin/mount_media_nfs.sh
# LaunchDaemon status
sudo launchctl print system/com.local.mountmedia | egrep 'state|last exit|PID' || true
# Mount status (should NOT say "automounted")
mount | grep " on /Volumes/media "
# NFS mount parameters
nfsstat -m | sed -n '/\/Volumes\/media/,+12p'
# Script logs
tail -n 100 /var/log/mount_media.out /var/log/mount_media.err 2>/dev/null || true
```
---
### 5) Jellyfin settings
- Temporarily disable “Enable real-time monitoring” for libraries under `/Volumes/media` until you confirm the mount stays stable.
- Then run “Scan Library Files” to repopulate anything previously removed.
---
### 6) Reboot test (recommended)
```
sudo shutdown -r now
```
After reboot:
```
mount | grep " on /Volumes/media " || echo "Not mounted yet"
sudo launchctl print system/com.local.mountmedia | egrep 'state|last exit' || true
tail -n 100 /var/log/mount_media.out /var/log/mount_media.err 2>/dev/null || true
```
---
## Rationale for Key Choices
- Persistent mount (LaunchDaemon) instead of autofs:
- autofs can unmount after inactivity; Jellyfin then removes items it thinks are gone.
- LaunchDaemon ensures the mount is present before scans and remains mounted.
- NFS options:
- `hard`: Blocks I/O until server responds, avoiding spurious “file missing” errors.
- `nfsvers=4.0`: Matches typical NAS defaults and the clients chosen version.
- `resvport` then fallback `noresvport`: Some servers require privileged ports; the script tries both.
- Network preflight:
- Check TCP/2049 reachability to avoid “Network is unreachable” failures (exit code 51) at boot or during link flaps.
- Logging:
- `/var/log/mount_media.out` and `.err` make it easy to correlate with Jellyfin logs.
---
## Troubleshooting
- “Operation not permitted” when `chown`/`chmod`:
- The path is mounted over NFS, and root-squash likely prevents ownership changes from the client. Unmount first or change ownership on the NAS.
- LaunchDaemon errors:
- Validate plist: `sudo plutil -lint /Library/LaunchDaemons/com.local.mountmedia.plist`
- Service state: `sudo launchctl print system/com.local.mountmedia`
- Mount health:
- `nfsstat -m` should show vers=4.0, hard, resvport/noresvport.
- Network/power:
- Prevent system sleep that drops the NIC; enable “Wake for network access.”
---
## Optional: If you must keep autofs
Increase the autofs timeout so it doesnt unmount on brief inactivity (less ideal than the LaunchDaemon approach):
```
sudo cp /etc/auto_master /etc/auto_master.bak.$(date +%F_%H%M%S)
sudo sed -i.bak -E 's|^/-[[:space:]]+/etc/auto_nfs$|/- -timeout=604800 /etc/auto_nfs|' /etc/auto_master
sudo automount -vc
```
---
## Reverting
To revert to autofs:
```
# Stop and remove LaunchDaemon
sudo launchctl bootout system /Library/LaunchDaemons/com.local.mountmedia.plist
sudo rm -f /Library/LaunchDaemons/com.local.mountmedia.plist
# Restore /etc/auto_master (uncomment direct map) and reload
sudo sed -i.bak 's|^#/- /etc/auto_nfs|/- /etc/auto_nfs|' /etc/auto_master
sudo automount -vc
```
---
## Notes
- Change permissions/ownership on the NFS export from the NAS, not the macOS client (root-squash).
- `showmount` may fail against NFSv4-only servers; its not needed here.
- Adjust `SERVER`, `MOUNT`, and `StartInterval` to suit your environment.

316
macos/Jellyfin-SMB.md Normal file
View File

@@ -0,0 +1,316 @@
# Jellyfin + macOS: Persistent NFS Mount (Fix for Libraries Randomly “Clearing”)
This README documents the working fix I applied when Jellyfin (on a Mac mini) periodically “lost” or cleared my Movies/TV libraries that live on a NAS mounted over NFS.
It includes the exact commands, files, and rationale so I can reproduce it later.
---
## Problem Summary
- Symptom: Every day or two, Jellyfin showed empty Movies/TV libraries.
- Media location: NFS share at `/Volumes/media` from NAS `192.168.50.105:/media`.
- Root cause: macOS was using autofs (`/- /etc/auto_nfs`). autofs can unmount after inactivity or brief network blips. When the mount disappears during a Jellyfin scan/file-watch, Jellyfin sees files as missing and removes them from its DB.
## Solution Summary
- Stop using autofs for this path.
- Create a persistent mount at boot using a LaunchDaemon and a small networkaware mount script.
- The script:
- Is idempotent: does nothing if already mounted.
- Checks NAS reachability first.
- Logs to `/var/log/mount_media.(out|err)`.
- Optionally restarts Jellyfin (Homebrew service) if the mount comes back.
---
## Prerequisites / Assumptions
- macOS with admin (sudo) access.
- NFS server: `192.168.50.105` exporting `/media` (adjust as needed).
- Mount point: `/Volumes/media` (adjust as needed).
- Jellyfin installed (optional Homebrew service restart in script).
> Tip: If your NAS requires privileged source ports for NFSv4, `resvport` helps. The script falls back to `noresvport` if needed.
---
## Steps (copy/paste commands)
### 1) Disable autofs for this path and unmount any automounted share
```
# Backup and comment out the direct map for NFS
sudo cp /etc/auto_master /etc/auto_master.bak.$(date +%F_%H%M%S)
sudo sed -i.bak 's|^/- /etc/auto_nfs|#/- /etc/auto_nfs|' /etc/auto_master
# Reload automountd (will unmount /Volumes/media if it was automounted)
sudo automount -vc
# Ensure the mountpoint is not currently mounted (ignore errors if already unmounted)
sudo umount /Volumes/media 2>/dev/null || sudo umount -f /Volumes/media 2>/dev/null || true
```
> Note: If `chown`/`chmod` say “Operation not permitted,” the path is still mounted (or your NAS has root-squash). Unmount first.
---
### 2) Create the networkaware mount script
```
sudo mkdir -p /usr/local/sbin
sudo tee /usr/local/sbin/mount_media_nfs.sh > /dev/null <<'SH'
#!/bin/sh
set -eu
LOG="/var/log/mount_media.out"
ERR="/var/log/mount_media.err"
MOUNT="/Volumes/media"
# SMB server settings — use domain name (FQDN)
HOST="nas.example.local" # <- change to your domain
SHARE="media" # <- change share name if needed
# Optional auth:
# - If SMB_USER is set, script will try authenticated mount.
# - Supply SMB_PASS (environment) OR set SMB_KEYCHAIN_ITEM to fetch password from Keychain.
SMB_USER="${SMB_USER:-}"
SMB_PASS="${SMB_PASS:-}"
SMB_KEYCHAIN_ITEM="${SMB_KEYCHAIN_ITEM:-}"
# Ensure mountpoint exists
[ -d "$MOUNT" ] || mkdir -p "$MOUNT"
# If already mounted on the mountpoint, exit quietly
if mount | awk '{print $3}' | grep -qx "$MOUNT"; then
echo "$(date) already mounted: $MOUNT" >>"$LOG"
exit 0
fi
# Preflight: only try to mount when SMB port is reachable (try 445 then 139)
if ! ( /usr/bin/nc -G 2 -z "$HOST" 445 >/dev/null 2>&1 || /usr/bin/nc -G 2 -z "$HOST" 139 >/dev/null 2>&1 ); then
echo "$(date) NAS not reachable on SMB ports (445/139), skipping mount" >>"$LOG"
exit 0
fi
# Helpful server listing for debugging (doesn't include credentials)
echo "$(date) smbutil listing for debugging" >>"$LOG" 2>>"$ERR"
smbutil view "//$HOST" >>"$LOG" 2>>"$ERR" || true
smbutil view "//guest@$HOST" >>"$LOG" 2>>"$ERR" || true
# Helper: function to verify mount and restart Jellyfin if needed
verify_and_exit() {
if mount | awk '{print $3}' | grep -qx "$MOUNT"; then
echo "$(date) mount OK: $MOUNT" >>"$LOG"
if command -v brew >/dev/null 2>&1 && brew services list | grep -q '^jellyfin\b'; then
echo "$(date) restarting Jellyfin (brew services)" >>"$LOG"
brew services restart jellyfin >>"$LOG" 2>>"$ERR" || true
fi
exit 0
fi
}
# Try authenticated mount if SMB_USER provided
if [ -n "$SMB_USER" ]; then
# Retrieve password from Keychain if requested and SMB_PASS not set
if [ -z "$SMB_PASS" ] && [ -n "$SMB_KEYCHAIN_ITEM" ]; then
# Try to read password from Keychain (service name = SMB_KEYCHAIN_ITEM, account = SMB_USER)
# The -w flag prints only the password
SMB_PASS="$(security find-generic-password -s "$SMB_KEYCHAIN_ITEM" -a "$SMB_USER" -w 2>/dev/null || true)"
fi
if [ -n "$SMB_PASS" ]; then
# Use password via stdin to avoid exposing it in process list
echo "$(date) attempting authenticated mount as user '$SMB_USER' -> $MOUNT" >>"$LOG"
# Do NOT include the password in the URL or logs.
MOUNT_URL="//${SMB_USER}@${HOST}/${SHARE}"
# Send password followed by newline to mount_smbfs which will read it from stdin
if printf '%s\n' "$SMB_PASS" | /sbin/mount_smbfs "$MOUNT_URL" "$MOUNT" >>"$LOG" 2>>"$ERR"; then
verify_and_exit
else
echo "$(date) authenticated mount attempt FAILED" >>"$ERR"
# Fall through to guest attempts
fi
else
# No password available for authenticated mount
echo "$(date) SMB_USER set but no SMB_PASS or Keychain entry found -> will try guest" >>"$LOG"
fi
fi
# If we reach here, try guest access (null/anonymous session)
echo "$(date) trying guest/null session (mount_smbfs -N) -> $MOUNT" >>"$LOG"
if /sbin/mount_smbfs -N "//$HOST/$SHARE" "$MOUNT" >>"$LOG" 2>>"$ERR"; then
verify_and_exit
fi
echo "$(date) trying explicit guest user (guest@$HOST) -> $MOUNT" >>"$LOG"
if /sbin/mount_smbfs "//guest@$HOST/$SHARE" "$MOUNT" >>"$LOG" 2>>"$ERR"; then
verify_and_exit
fi
# If we reached here, all attempts failed
echo "$(date) ALL SMB mount attempts FAILED" >>"$ERR"
echo "------ smbutil status ------" >>"$ERR"
smbutil statshares -a >>"$ERR" 2>&1 || true
echo "------ mount table ------" >>"$ERR"
mount >>"$ERR" 2>&1 || true
exit 1
SH
sudo chmod 755 /usr/local/sbin/mount_media_smb.sh
```
---
### 3) Create the LaunchDaemon (mount at boot, re-check periodically, networkaware)
```
sudo tee /Library/LaunchDaemons/com.local.mountmedia.plist > /dev/null <<'PLIST'
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>com.local.mountmedia</string>
<key>ProgramArguments</key>
<array>
<string>/usr/local/sbin/mount_media_smb.sh</string>
</array>
<key>RunAtLoad</key>
<true/>
<key>StartInterval</key>
<integer>300</integer>
<key>KeepAlive</key>
<dict>
<key>NetworkState</key>
<true/>
</dict>
<key>StandardOutPath</key>
<string>/var/log/mount_media.out</string>
<key>StandardErrorPath</key>
<string>/var/log/mount_media.err</string>
</dict>
</plist>
PLIST
sudo chown root:wheel /Library/LaunchDaemons/com.local.mountmedia.plist
sudo chmod 644 /Library/LaunchDaemons/com.local.mountmedia.plist
sudo plutil -lint /Library/LaunchDaemons/com.local.mountmedia.plist
sudo launchctl bootout system /Library/LaunchDaemons/com.local.mountmedia.plist 2>/dev/null || true
sudo launchctl bootstrap system /Library/LaunchDaemons/com.local.mountmedia.plist
sudo launchctl enable system/com.local.mountmedia
sudo launchctl kickstart -k system/com.local.mountmedia
```
---
### 4) Run once and verify
```
# Run once now (idempotent; logs "already mounted" if present)
sudo /usr/local/sbin/mount_media_smb.sh
# LaunchDaemon status
sudo launchctl print system/com.local.mountmedia | egrep 'state|last exit|PID' || true
# Mount status (should NOT say "automounted")
mount | grep " on /Volumes/media "
# SMB mount parameters
smbstat -m | sed -n '/\/Volumes\/media/,+12p'
# Script logs
tail -n 100 /var/log/mount_media.out /var/log/mount_media.err 2>/dev/null || true
```
---
### 5) Jellyfin settings
- Temporarily disable “Enable real-time monitoring” for libraries under `/Volumes/media` until you confirm the mount stays stable.
- Then run “Scan Library Files” to repopulate anything previously removed.
---
### 6) Reboot test (recommended)
```
sudo shutdown -r now
```
After reboot:
```
mount | grep " on /Volumes/media " || echo "Not mounted yet"
sudo launchctl print system/com.local.mountmedia | egrep 'state|last exit' || true
tail -n 100 /var/log/mount_media.out /var/log/mount_media.err 2>/dev/null || true
```
---
## Rationale for Key Choices
- Persistent mount (LaunchDaemon) instead of autofs:
- autofs can unmount after inactivity; Jellyfin then removes items it thinks are gone.
- LaunchDaemon ensures the mount is present before scans and remains mounted.
- smb options:
- `hard`: Blocks I/O until server responds, avoiding spurious “file missing” errors.
- `nfsvers=4.0`: Matches typical NAS defaults and the clients chosen version.
- `resvport` then fallback `noresvport`: Some servers require privileged ports; the script tries both.
- Network preflight:
- Check TCP/2049 reachability to avoid “Network is unreachable” failures (exit code 51) at boot or during link flaps.
- Logging:
- `/var/log/mount_media.out` and `.err` make it easy to correlate with Jellyfin logs.
---
## Troubleshooting
- “Operation not permitted” when `chown`/`chmod`:
- The path is mounted over NFS, and root-squash likely prevents ownership changes from the client. Unmount first or change ownership on the NAS.
- LaunchDaemon errors:
- Validate plist: `sudo plutil -lint /Library/LaunchDaemons/com.local.mountmedia.plist`
- Service state: `sudo launchctl print system/com.local.mountmedia`
- Mount health:
- `nfsstat -m` should show vers=4.0, hard, resvport/noresvport.
- Network/power:
- Prevent system sleep that drops the NIC; enable “Wake for network access.”
---
## Optional: If you must keep autofs
Increase the autofs timeout so it doesnt unmount on brief inactivity (less ideal than the LaunchDaemon approach):
```
sudo cp /etc/auto_master /etc/auto_master.bak.$(date +%F_%H%M%S)
sudo sed -i.bak -E 's|^/-[[:space:]]+/etc/auto_nfs$|/- -timeout=604800 /etc/auto_nfs|' /etc/auto_master
sudo automount -vc
```
---
## Reverting
To revert to autofs:
```
# Stop and remove LaunchDaemon
sudo launchctl bootout system /Library/LaunchDaemons/com.local.mountmedia.plist
sudo rm -f /Library/LaunchDaemons/com.local.mountmedia.plist
# Restore /etc/auto_master (uncomment direct map) and reload
sudo sed -i.bak 's|^#/- /etc/auto_nfs|/- /etc/auto_nfs|' /etc/auto_master
sudo automount -vc
```
---
## Notes
- Change permissions/ownership on the NFS export from the NAS, not the macOS client (root-squash).
- `showmount` may fail against NFSv4-only servers; its not needed here.
- Adjust `SERVER`, `MOUNT`, and `StartInterval` to suit your environment.