feat: Implement Ansible for Proxmox VM management and refactor Mastodon Docker Compose, removing outdated service definitions.

This commit is contained in:
Nikholas Pcenicni
2026-01-19 02:02:49 -05:00
parent 6154a93f1b
commit f8591ccae6
39 changed files with 437 additions and 376 deletions

50
ansible/README.md Normal file
View File

@@ -0,0 +1,50 @@
# Home Server Ansible Configuration
This directory contains Ansible playbooks for managing the Proxmox home server environment.
## Directory Structure
- `inventory/`: Contains the inventory file `hosts.ini` where you define your servers.
- `playbooks/`: Contains the actual Ansible playbooks.
- `ansible.cfg`: Local Ansible configuration.
- `requirements.yml`: List of Ansible collections required.
## Setup
1. **Install Requirements**:
```bash
ansible-galaxy install -r requirements.yml
```
2. **Configure Inventory**:
Edit `inventory/hosts.ini` and update the following:
- `ansible_host`: The IP address of your Proxmox node.
- `ansible_user`: The SSH user (usually root).
- `proxmox_api_*`: Variables if you plan to use API-based modules in the future.
*Note: Ensure you have SSH key access to your Proxmox node for passwordless login, or uncomment `ansible_ssh_pass`.*
## Available Playbooks
### Create Ubuntu Cloud Template (`playbooks/create_ubuntu_template.yml`)
This playbook downloads a generic Ubuntu 22.04 Cloud Image and converts it into a Proxmox VM Template.
**Usage:**
```bash
# Run the playbook
ansible-playbook playbooks/create_ubuntu_template.yml
```
**Variables:**
You can override variables at runtime or by editing the playbook:
- `template_id`: Default `9000`
- `template_name`: Default `ubuntu-2204-cloud`
- `storage_pool`: Default `local-lvm`
Example overriding variables:
```bash
ansible-playbook playbooks/create_ubuntu_template.yml -e "template_id=9001 template_name=my-custom-template"
```

5
ansible/ansible.cfg Normal file
View File

@@ -0,0 +1,5 @@
[defaults]
inventory = inventory/hosts.ini
host_key_checking = False
retry_files_enabled = False
interpreter_python = auto_silent

View File

@@ -0,0 +1,14 @@
[proxmox]
# Replace pve1 with your proxmox node hostname or IP
mercury ansible_host=192.168.50.100 ansible_user=root
[proxmox:vars]
# If using password auth (ssh key recommended though):
# ansible_ssh_pass=yourpassword
# Connection variables for the proxmox modules (api)
proxmox_api_user=root@pam
proxmox_api_password=CHANGE_ME
proxmox_api_host=192.168.50.100
# proxmox_api_token_id=
# proxmox_api_token_secret=

View File

@@ -0,0 +1,72 @@
---
- name: Create Ubuntu Cloud-Init Template
hosts: proxmox
become: yes
vars:
template_id: 9000
template_name: ubuntu-2204-cloud
# URL for Ubuntu 22.04 Cloud Image (Jammy)
image_url: "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
image_name: "ubuntu-22.04-server-cloudimg-amd64.img"
storage_pool: "local-lvm"
memory: 2048
cores: 2
tasks:
- name: Check if template already exists
command: "qm status {{ template_id }}"
register: vm_status
failed_when: false
changed_when: false
- name: Fail if template ID exists
fail:
msg: "VM ID {{ template_id }} already exists. Please choose a different ID or delete the existing VM."
when: vm_status.rc == 0
- name: Download Ubuntu Cloud Image
get_url:
url: "{{ image_url }}"
dest: "/tmp/{{ image_name }}"
mode: '0644'
- name: Install libguestfs-tools (required for virt-customize if needed, optional)
apt:
name: libguestfs-tools
state: present
ignore_errors: yes
- name: Create VM with hardware config
command: >
qm create {{ template_id }}
--name "{{ template_name }}"
--memory {{ memory }}
--core {{ cores }}
--net0 virtio,bridge=vmbr0
--scsihw virtio-scsi-pci
--ostype l26
--serial0 socket --vga serial0
- name: Import Disk
command: "qm importdisk {{ template_id }} /tmp/{{ image_name }} {{ storage_pool }}"
- name: Attach Disk to SCSI
command: "qm set {{ template_id }} --scsi0 {{ storage_pool }}:vm-{{ template_id }}-disk-0"
- name: Add Cloud-Init Drive
command: "qm set {{ template_id }} --ide2 {{ storage_pool }}:cloudinit"
- name: Set Boot Order
command: "qm set {{ template_id }} --boot c --bootdisk scsi0"
- name: Resize Disk (Optional, e.g. 10G)
command: "qm resize {{ template_id }} scsi0 10G"
ignore_errors: yes
- name: Convert to Template
command: "qm template {{ template_id }}"
- name: Remove Downloaded Image
file:
path: "/tmp/{{ image_name }}"
state: absent

View File

@@ -0,0 +1,6 @@
---
- name: Manage Proxmox VMs
hosts: "{{ target_host | default('proxmox') }}"
become: yes
roles:
- proxmox_vm

2
ansible/requirements.yml Normal file
View File

@@ -0,0 +1,2 @@
collections:
- name: community.general

View File

@@ -0,0 +1,26 @@
---
# Defaults for proxmox_vm role
# Action to perform: create_template, create_vm, delete_vm, backup_vm
proxmox_action: create_vm
# Common settings
storage_pool: local-lvm
vmid: 9000
# Template Creation settings
template_name: ubuntu-cloud-template
image_url: "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
image_name: "ubuntu-22.04-server-cloudimg-amd64.img"
memory: 2048
cores: 2
# Create VM settings (cloning)
new_vm_name: new-vm
target_node: "{{ inventory_hostname }}" # For cloning, usually same node
clone_full: true # Full clone (independent) vs Linked clone
# Backup settings
backup_mode: snapshot # snapshot, suspend, stop
backup_compress: zstd
backup_storage: local

View File

@@ -0,0 +1,7 @@
---
- name: Create VM Backup
command: >
vzdump {{ vmid }}
--mode {{ backup_mode }}
--compress {{ backup_compress }}
--storage {{ backup_storage }}

View File

@@ -0,0 +1,58 @@
---
- name: Check if template already exists
command: "qm status {{ vmid }}"
register: vm_status
failed_when: false
changed_when: false
- name: Fail if template ID exists
fail:
msg: "VM ID {{ vmid }} already exists. Please choose a different ID or delete the existing VM."
when: vm_status.rc == 0
- name: Download Cloud Image
get_url:
url: "{{ image_url }}"
dest: "/tmp/{{ image_name }}"
mode: '0644'
- name: Install libguestfs-tools
apt:
name: libguestfs-tools
state: present
ignore_errors: yes
- name: Create VM with hardware config
command: >
qm create {{ vmid }}
--name "{{ template_name }}"
--memory {{ memory }}
--core {{ cores }}
--net0 virtio,bridge=vmbr0
--scsihw virtio-scsi-pci
--ostype l26
--serial0 socket --vga serial0
- name: Import Disk
command: "qm importdisk {{ vmid }} /tmp/{{ image_name }} {{ storage_pool }}"
- name: Attach Disk to SCSI
command: "qm set {{ vmid }} --scsi0 {{ storage_pool }}:vm-{{ vmid }}-disk-0"
- name: Add Cloud-Init Drive
command: "qm set {{ vmid }} --ide2 {{ storage_pool }}:cloudinit"
- name: Set Boot Order
command: "qm set {{ vmid }} --boot c --bootdisk scsi0"
- name: Resize Disk (Default 10G)
command: "qm resize {{ vmid }} scsi0 10G"
ignore_errors: yes
- name: Convert to Template
command: "qm template {{ vmid }}"
- name: Remove Downloaded Image
file:
path: "/tmp/{{ image_name }}"
state: absent

View File

@@ -0,0 +1,11 @@
---
- name: Clone VM from Template
command: >
qm clone {{ vmid }} {{ new_vmid }}
--name "{{ new_vm_name }}"
--full {{ 1 if clone_full | bool else 0 }}
register: clone_result
- name: Start VM (Optional)
command: "qm start {{ new_vmid }}"
when: start_after_create | default(false) | bool

View File

@@ -0,0 +1,7 @@
---
- name: Stop VM (Force Stop)
command: "qm stop {{ vmid }}"
ignore_errors: yes
- name: Destroy VM
command: "qm destroy {{ vmid }} --purge"

View File

@@ -0,0 +1,3 @@
---
- name: Dispatch task based on action
include_tasks: "{{ proxmox_action }}.yml"

View File

@@ -1,104 +0,0 @@
---
services:
# The container for BookStack itself
bookstack:
# You should update the version here to match the latest
# release of BookStack: https://github.com/BookStackApp/BookStack/releases
# You'll change this when wanting to update the version of BookStack used.
image: lscr.io/linuxserver/bookstack:latest
container_name: bookstack
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=Etc/UTC
# APP_URL must be set as the base URL you'd expect to access BookStack
# on via the browser. The default shown here is what you might use if accessing
# direct from the browser on the docker host, hence the use of the port as configured below.
- APP_URL=${APP_URL}
# APP_KEY must be a unique key. Generate your own by running
# docker run -it --rm --entrypoint /bin/bash lscr.io/linuxserver/bookstack:latest appkey
# You should keep the "base64:" part for the option value.
- APP_KEY=${API_KEY}
# The below database details are purposefully aligned with those
# configuted for the "mariadb" service below:
- DB_HOST=mariadb
- DB_PORT=3306
- DB_DATABASE=${DB_DATABASE}
- DB_USERNAME=${DB_USERNAME}
- DB_PASSWORD=${DB_PASSWORD}
# SAML
# configured for authentik
- AUTH_METHOD=${AUTH_METHOD}
- AUTH_AUTO_INITIATE=${AUTH_AUTO_INITIATE}
- SAML2_NAME=${SAML2_NAME}
- SAML2_EMAIL_ATTRIBUTE=${SAML2_EMAIL_ATTRIBUTE}
- SAML2_EXTERNAL_ID_ATTRIBUTE=${SAML2_EXTERNAL_ID_ATTRIBUTE}
- SAML2_USER_TO_GROUPS=${SAML2_USER_TO_GROUPS}
- SAML2_GROUP_ATTRIBUTE=${SAML2_GROUP_ATTRIBUTE}
- SAML2_DISPLAY_NAME_ATTRIBUTES=${SAML2_DISPLAY_NAME_ATTRIBUTES}
- SAML2_IDP_ENTITYID=${SAML2_IDP_ENTITYID}
- SAML2_AUTOLOAD_METADATA=${SAML2_AUTOLOAD_METADATA}
- SAML2_USER_TO_GROUPS=true
- SAML2_GROUP_ATTRIBUTE=groups
- SAML2_REMOVE_FROM_GROUPS=false
# SMTP
- MAIL_DRIVER=${MAIL_DRIVER}
- MAIL_HOST=${MAIL_HOST}
- MAIL_PORT=${MAIL_PORT}
- MAIL_ENCRYPTION=${MAIL_ENCRYPTION}
- MAIL_USERNAME=${MAIL_USERNAME}
- MAIL_PASSWORD=${MAIL_PASSWORD}
- MAIL_FROM=${MAIL_FROM}
- MAIL_FROM_NAME=${MAIL_FROM_NAME}
volumes:
# You generally only ever need to map this one volume.
# This maps it to a "bookstack_app_data" folder in the same
# directory as this compose config file.
- bookstack_app:/config
ports:
# This exposes port 6875 for general web access.
# Commonly you'd have a reverse proxy in front of this,
# redirecting incoming requests to this port.
- 6875:80
restart: unless-stopped
# The container for the database which BookStack will use to store
# most of its core data/content.
mariadb:
# You should update the version here to match the latest
# main version of the linuxserver mariadb container version:
# https://github.com/linuxserver/docker-mariadb/pkgs/container/mariadb/versions?filters%5Bversion_type%5D=tagged
image: lscr.io/linuxserver/mariadb:latest
container_name: bookstack-mariadb
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=Etc/UTC
# You may want to change the credentials used below,
# but be aware the latter three options need to align
# with the DB_* options for the BookStack container.
- MYSQL_ROOT_PASSWORD=${DB_ROOTPASS}
- MYSQL_DATABASE=${DB_DATABASE}
- MYSQL_USER=${DB_USERNAME}
- MYSQL_PASSWORD=${DB_PASSWORD}
volumes:
# You generally only ever need to map this one volume.
# This maps it to a "bookstack_db_data" folder in the same
# directory as this compose config file.
- bookstack_db:/config
# These ports are commented out as you don't really need this port
# exposed for normal use, mainly only if connecting direct the the
# database externally. Otherwise, this risks exposing access to the
# database when not needed.
# ports:
# - 3306:3306
restart: unless-stopped
volumes:
bookstack_app:
bookstack_db:

View File

@@ -1,46 +0,0 @@
# User and Group IDs
PUID=1000
PGID=100
# Timezone
TZ=Etc/UTC
# Application URL - set to the base URL where BookStack will be accessed
APP_URL=http://localhost:6875
# Application Key - Generate using:
# docker run -it --rm --entrypoint /bin/bash lscr.io/linuxserver/bookstack:latest appkey
API_KEY=base64:your_generated_app_key_here
# Database Configuration
DB_DATABASE=bookstack
DB_USERNAME=bookstack_user
DB_PASSWORD=your_secure_db_password
DB_ROOTPASS=your_secure_root_password
# Authentication Method (optional, defaults to standard)
# Options: standard, saml2
AUTH_METHOD=saml2
AUTH_AUTO_INITIATE=false
# SAML2 Configuration (for Authentik or other SAML providers)
SAML2_NAME=Authentik
SAML2_EMAIL_ATTRIBUTE=email
SAML2_EXTERNAL_ID_ATTRIBUTE=sub
SAML2_USER_TO_GROUPS=true
SAML2_GROUP_ATTRIBUTE=groups
SAML2_DISPLAY_NAME_ATTRIBUTES=displayName
SAML2_IDP_ENTITYID=https://authentik.yourdomain.com/application/saml/bookstack/sso/binding/
SAML2_AUTOLOAD_METADATA=https://authentik.yourdomain.com/application/saml/bookstack/sso/binding/
# SMTP Configuration (for email notifications)
MAIL_DRIVER=smtp
MAIL_HOST=smtp.yourdomain.com
MAIL_PORT=587
MAIL_ENCRYPTION=tls
MAIL_USERNAME=smtp_username
MAIL_PASSWORD=smtp_password
MAIL_FROM=noreply@yourdomain.com
MAIL_FROM_NAME=BookStack

View File

@@ -1,5 +0,0 @@
services:
rtraceio:
image: quay.io/rtraceio/flink
ports:
- '8080:8080'

View File

@@ -1,5 +0,0 @@
# Flink - No environment variables required
# This service uses the default configuration
# Access the web UI at http://localhost:8080

View File

@@ -1,48 +1,99 @@
# Reference list of environment variables / placeholders that Komodo should set for the containers. # Service configuration
# Fill these in your Komodo service/environment configuration (do NOT store secrets in VCS). # ---------------------
LOCAL_DOMAIN=example.com
LOCAL_HTTPS=true
ALTERNATE_DOMAINS=localhost,127.0.0.1
# Use 'true' since you have an external proxy (Pangolin/Nginx) handling TLS
# This tells Mastodon to generate https:// links
# Basic site # Trusted Proxy Configuration
LOCAL_DOMAIN=masto.pcenicni.social # ---------------------------
LOCAL_HTTPS=true # true since Pangolin terminates TLS # Allow Mastodon to trust headers (X-Forwarded-For, X-Forwarded-Proto) from your reverse proxy.
RAILS_ENV=production # We whitelist standard private ranges so the proxy's internal IP is trusted.
PORT=3000 TRUSTED_PROXY_IP=10.0.0.0/8,172.16.0.0/12,192.168.0.0/16
STREAMING_PORT=4000
# Database (Postgres) # OIDC / Authentik Integration
# ----------------------------
OIDC_ENABLED=true
OIDC_DISPLAY_NAME=Authentik
OIDC_DISCOVERY=true
# Fill these in from Authentik:
OIDC_ISSUER=https://auth.example.com/application/o/mastodon/
OIDC_AUTH_ENDPOINT=https://auth.example.com/application/o/authorize/
OIDC_CLIENT_ID=<YOUR_CLIENT_ID>
OIDC_CLIENT_SECRET=<YOUR_CLIENT_SECRET>
OIDC_SCOPE=openid,profile,email
OIDC_UID_FIELD=preferred_username
OIDC_REDIRECT_URI=https://social.example.com/auth/auth/openid_connect/callback
# Automatically verify emails from Authentik
OIDC_SECURITY_ASSUME_EMAIL_IS_VERIFIED=true
# To force users to log in with Authentik only:
# OMNIAUTH_ONLY=true
# Database configuration
# ----------------------
DB_HOST=db DB_HOST=db
DB_PORT=5432 DB_PORT=5432
DB_NAME=mastodon_production DB_NAME=mastodon_production
DB_USER=mastodon DB_USER=mastodon
# DB password: must be set securely in Komodo DB_PASS=<DB_PASSWORD>
DB_PASSWORD=__REPLACE_DB_PASSWORD__ # used by postgres service # DB_PASS is used by the Mastodon application to connect
DB_PASS=${DB_PASSWORD} # passed into Mastodon containers
# Redis # Postgres container configuration (must match above)
REDIS_URL=redis://redis:6379 POSTGRES_USER=mastodon
# If you use a Redis password, set REDIS_URL accordingly (e.g. redis://:password@redis:6379) POSTGRES_PASSWORD=<DB_PASSWORD>
POSTGRES_DB=mastodon_production
# Mastodon secrets (generate securely) # Redis configuration
SECRET_KEY_BASE=__REPLACE_SECRET_KEY_BASE__ # -------------------
OTP_SECRET=__REPLACE_OTP_SECRET__ REDIS_HOST=redis
VAPID_PUBLIC_KEY=__REPLACE_VAPID_PUBLIC_KEY__ REDIS_PORT=6379
VAPID_PRIVATE_KEY=__REPLACE_VAPID_PRIVATE_KEY__ # REDIS_PASSWORD=
# If you set a Redis password, also update REDIS_URL below
# ActiveRecord encryption keys (required for Rails/Mastodon encrypted attributes) # Mastodon secrets
# Generate strong random values (examples below generate hex strings). # ----------------
# Komodo must set these three variables. Do NOT reuse or change them after data is written without a proper key-rotation plan. # Use `docker-compose run --rm web bundle exec rake secret` to generate new keys if needed
ACTIVERECORD_ENCRYPTION_PRIMARY_KEY=__REPLACE_WITH_PRIMARY_KEY__ # 32 bytes (hex recommended) # Generate new secrets for production!
ACTIVERECORD_ENCRYPTION_DETERMINISTIC_KEY=__REPLACE_WITH_DETERMINISTIC_KEY__ # 32 bytes (hex recommended) SECRET_KEY_BASE=<GENERATED_SECRET>
ACTIVERECORD_ENCRYPTION_KEY_DERIVATION_SALT=__REPLACE_WITH_KEY_DERIVATION_SALT__ # 16 bytes (hex recommended) OTP_SECRET=<GENERATED_SECRET>
# Mail / SMTP (placeholders; template uses Gmail STARTTLS) # VAPID keys (for push notifications)
# Required. Generate with `docker-compose run --rm web bundle exec rake mastodon:webpush:generate_vapid_key`
VAPID_PRIVATE_KEY=<GENERATED_VAPID_PRIVATE_KEY>
VAPID_PUBLIC_KEY=<GENERATED_VAPID_PUBLIC_KEY>
# ActiveRecord Encryption (Rails 7+)
# ----------------------------------
# Required. Do not change these once data is encrypted in the DB.
# Generate these!
ACTIVE_RECORD_ENCRYPTION_PRIMARY_KEY=<GENERATED_KEY>
ACTIVE_RECORD_ENCRYPTION_DETERMINISTIC_KEY=<GENERATED_KEY>
ACTIVE_RECORD_ENCRYPTION_KEY_DERIVATION_SALT=<GENERATED_SALT>
# S3 / Object Storage (Optional)
# ------------------------------
# S3_ENABLED=true
# S3_BUCKET=
# AWS_ACCESS_KEY_ID=
# AWS_SECRET_ACCESS_KEY=
# S3_REGION=
# S3_PROTOCOL=https
# S3_HOSTNAME=
# SMTP / Email
# ------------
SMTP_SERVER=smtp.gmail.com SMTP_SERVER=smtp.gmail.com
SMTP_PORT=587 SMTP_PORT=587
SMTP_LOGIN=notifications@example.com SMTP_LOGIN=notifications@example.com
SMTP_PASSWORD=__REPLACE_SMTP_PASSWORD__ SMTP_PASSWORD=<SMTP_PASSWORD>
SMTP_FROM_ADDRESS=notifications@example.com SMTP_FROM_ADDRESS=notifications@example.com
SMTP_AUTH_METHOD=plain SMTP_AUTH_METHOD=plain
SMTP_OPENSSL_VERIFY_MODE=require SMTP_OPENSSL_VERIFY_MODE=require
# SMTP_ENABLE_STARTTLS_AUTO=true
# Misc # Application defaults
STREAMING_ENABLED=true # --------------------
RAILS_ENV=production
NODE_ENV=production
RAILS_SERVE_STATIC_FILES=true RAILS_SERVE_STATIC_FILES=true

View File

@@ -1,149 +0,0 @@
# docker-compose.yml
version: "3.8"
services:
db:
image: postgres:14-alpine
restart: unless-stopped
env_file:
- .env.production
volumes:
- db-data:/var/lib/postgresql/data
redis:
image: redis:6-alpine
restart: unless-stopped
command: ["redis-server", "--appendonly", "yes"]
env_file:
- .env.production
volumes:
- redis-data:/data
init:
image: ghcr.io/mastodon/mastodon:latest
depends_on:
- db
- redis
restart: "no"
env_file:
- .env.production
volumes:
- public-system:/mastodon/public/system
- public-assets:/mastodon/public/assets
- public-packs:/mastodon/public/packs
- mastodon-log:/mastodon/log
command: >
bash -lc "
set -euo pipefail
echo '== Mastodon init job starting'
# 1) Check ActiveRecord encryption keys; if missing, run db:encryption:init to generate and print them then exit.
if [ -z \"${ACTIVE_RECORD_ENCRYPTION_PRIMARY_KEY:-}\" ] || [ -z \"${ACTIVE_RECORD_ENCRYPTION_DETERMINISTIC_KEY:-}\" ] || [ -z \"${ACTIVE_RECORD_ENCRYPTION_KEY_DERIVATION_SALT:-}\" ]; then
echo 'ActiveRecord encryption keys are NOT set. Running bin/rails db:encryption:init to generate keys...'
bin/rails db:encryption:init || true
echo '======================================================='
echo 'The above command generated ACTIVE_RECORD encryption keys. Copy them into .env.production (use these exact names):'
echo ' ACTIVE_RECORD_ENCRYPTION_PRIMARY_KEY'
echo ' ACTIVE_RECORD_ENCRYPTION_DETERMINISTIC_KEY'
echo ' ACTIVE_RECORD_ENCRYPTION_KEY_DERIVATION_SALT'
echo ''
echo 'After editing .env.production to include the keys, re-run this init job:'
echo ' docker-compose run --rm --no-deps init'
echo 'Exiting with code 1 so you persist the keys before continuing.'
exit 1
fi
echo 'ActiveRecord encryption keys present. Continuing initialization...'
# 2) Wait for Postgres readiness
echo 'Waiting for Postgres to be ready...'
attempt=0
until bundle exec rails db:version >/dev/null 2>&1; do
attempt=$((attempt+1))
if [ \"$attempt\" -gt 60 ]; then
echo 'Timed out waiting for Postgres (60 attempts). Check DB connectivity and env vars.' >&2
exit 2
fi
echo \"Postgres not ready yet (attempt $attempt). Sleeping 2s...\"
sleep 2
done
echo 'Postgres is ready.'
# 3) Prepare DB (create/migrate)
echo 'Running rails db:prepare (create DB / migrate if needed)...'
bundle exec rails db:prepare
# 4) Generate VAPID keys if missing (prints keys)
if [ -z \"${VAPID_PUBLIC_KEY:-}\" ] || [ -z \"${VAPID_PRIVATE_KEY:-}\" ]; then
echo 'VAPID keys (VAPID_PUBLIC_KEY/VAPID_PRIVATE_KEY) are missing. Generating...'
bundle exec rake mastodon:webpush:generate_vapid_key || true
echo '======================================================='
echo 'If VAPID keys were printed above, copy them into .env.production as VAPID_PUBLIC_KEY and VAPID_PRIVATE_KEY and re-run init.'
else
echo 'VAPID keys present.'
fi
# 5) Install JS deps and precompile assets
echo 'Installing javascript dependencies (yarn)...'
if command -v yarn >/dev/null 2>&1; then
yarn install --check-files --production=false
else
echo 'yarn not found in image; skipping yarn install (ensure assets are built if image doesn't include yarn).'
fi
echo 'Precompiling rails assets...'
RAILS_ENV=production bundle exec rails assets:precompile
echo 'Init job complete. You can now start web/sidekiq/streaming services.'
"
web:
image: ghcr.io/mastodon/mastodon:latest
depends_on:
- db
- redis
restart: unless-stopped
env_file:
- .env.production
volumes:
- public-system:/mastodon/public/system
- public-assets:/mastodon/public/assets
- public-packs:/mastodon/public/packs
- mastodon-log:/mastodon/log
ports:
- "3000:3000"
command: bash -lc "RAILS_ENV=production bundle exec puma -C config/puma.rb"
sidekiq:
image: ghcr.io/mastodon/mastodon:latest
depends_on:
- db
- redis
restart: unless-stopped
env_file:
- .env.production
volumes:
- public-system:/mastodon/public/system
- mastodon-log:/mastodon/log
command: bash -lc "RAILS_ENV=production bundle exec sidekiq"
streaming:
image: ghcr.io/mastodon/mastodon:latest
depends_on:
- redis
restart: unless-stopped
env_file:
- .env.production
volumes:
- mastodon-log:/mastodon/log
ports:
- "4000:4000"
command: bash -lc "NODE_ENV=production ./bin/streaming"
volumes:
db-data:
redis-data:
public-system:
public-assets:
public-packs:
mastodon-log:

View File

@@ -0,0 +1,94 @@
version: '3.8'
services:
db:
image: postgres:14-alpine
restart: unless-stopped
shm_size: 256mb
networks:
- internal_network
healthcheck:
test: ["CMD", "pg_isready", "-U", "mastodon", "-d", "mastodon_production"]
volumes:
- postgres_data:/var/lib/postgresql/data
env_file:
- .env
redis:
image: redis:7-alpine
restart: unless-stopped
networks:
- internal_network
healthcheck:
test: ["CMD", "redis-cli", "ping"]
volumes:
- redis_data:/data
env_file:
- .env
web:
image: ghcr.io/mastodon/mastodon:latest
restart: unless-stopped
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
networks:
- internal_network
- external_network
env_file:
- .env
volumes:
- mastodon_system:/mastodon/public/system
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:3000/health"]
ports:
- "3000:3000"
extra_hosts:
- "auth.pcenicni.dev:192.168.50.160"
command: bash -lc "rm -f /mastodon/tmp/pids/server.pid; bundle exec rails db:prepare; bundle exec puma -C config/puma.rb"
sidekiq:
image: ghcr.io/mastodon/mastodon:latest
restart: unless-stopped
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
networks:
- internal_network
- external_network
env_file:
- .env
volumes:
- mastodon_system:/mastodon/public/system
command: bash -lc "bundle exec sidekiq"
streaming:
image: ghcr.io/mastodon/mastodon-streaming:latest
restart: unless-stopped
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
networks:
- internal_network
- external_network
env_file:
- .env
ports:
- "4000:4000"
command: node ./streaming
networks:
internal_network:
internal: true
external_network:
volumes:
postgres_data:
redis_data:
mastodon_system:

View File

@@ -1,36 +0,0 @@
# Run locally (on your machine or a secure host) to create the secrets you must paste into Komodo.
# This prints secure values for DB, Rails secrets, VAPID guidance and ActiveRecord encryption keys.
set -e
echo "Generating secrets (openssl)..."
DB_PASSWORD=$(openssl rand -hex 16)
SECRET_KEY_BASE=$(openssl rand -hex 64)
OTP_SECRET=$(openssl rand -hex 64)
# ActiveRecord encryption keys:
# - primary and deterministic keys: 32 bytes (hex) recommended
# - salt: 16 bytes (hex) recommended
ACTIVERECORD_ENCRYPTION_PRIMARY_KEY=$(openssl rand -hex 32)
ACTIVERECORD_ENCRYPTION_DETERMINISTIC_KEY=$(openssl rand -hex 32)
ACTIVERECORD_ENCRYPTION_KEY_DERIVATION_SALT=$(openssl rand -hex 16)
echo ""
echo "Copy these values into your Komodo environment configuration for the Mastodon services:"
echo ""
echo "DB_PASSWORD=${DB_PASSWORD}"
echo "SECRET_KEY_BASE=${SECRET_KEY_BASE}"
echo "OTP_SECRET=${OTP_SECRET}"
echo ""
echo "ACTIVERECORD_ENCRYPTION_PRIMARY_KEY=${ACTIVERECORD_ENCRYPTION_PRIMARY_KEY}"
echo "ACTIVERECORD_ENCRYPTION_DETERMINISTIC_KEY=${ACTIVERECORD_ENCRYPTION_DETERMINISTIC_KEY}"
echo "ACTIVERECORD_ENCRYPTION_KEY_DERIVATION_SALT=${ACTIVERECORD_ENCRYPTION_KEY_DERIVATION_SALT}"
echo ""
echo "Next: pull/build images on the host where you run docker-compose, then run the VAPID-generation rake task to get VAPID keys:"
echo ""
echo " docker-compose pull"
echo " docker-compose run --rm web bash -lc \"RAILS_ENV=production bundle exec rake mastodon:webpush:generate_vapid_key\""
echo ""
echo "After running that rake task, copy the printed VAPID_PUBLIC_KEY and VAPID_PRIVATE_KEY into Komodo as environment variables."
echo ""
echo "Also set SMTP_PASSWORD and any other SMTP fields in Komodo."