Compare commits

..

2 commits
master ... mail

Author SHA1 Message Date
wizzdom
fb0ec615c2
add mailserver env configs 2024-10-14 20:35:41 +01:00
wizzdom
f21931eb32
add initial mail config files 2024-10-14 20:21:58 +01:00
38 changed files with 1301 additions and 1632 deletions

View file

@ -1,11 +1,5 @@
client {
enabled = true
# for minecraft modpack zip bombing allowance
artifact {
decompression_size_limit = "0"
decompression_file_count_limit = 12000
}
bridge_network_hairpin_mode = true
}
plugin "raw_exec" {
@ -21,4 +15,4 @@ plugin "docker" {
enabled = true
}
}
}
}

View file

@ -1,64 +0,0 @@
job "esports-minecraft" {
datacenters = ["aperture"]
type = "service"
group "esports-mc" {
count = 1
network {
port "mc" {
to = 25565
}
port "rcon" {
to = 25575
}
}
service {
name = "esports-mc"
port = "mc"
}
service {
name = "esports-mc-rcon"
port = "rcon"
}
task "esports-minecraft" {
driver = "docker"
config {
image = "itzg/minecraft-server"
ports = ["mc", "rcon"]
volumes = [
"/storage/nomad/${NOMAD_TASK_NAME}:/data"
]
}
resources {
cpu = 5000 # 5000 MHz
memory = 20480 # 20 GB
}
template {
data = <<EOF
EULA = "TRUE"
TYPE = "PAPER"
VERSION = "1.21.4"
ICON = "https://liquipedia.net/commons/images/thumb/5/53/DCU_Esports_allmode.png/37px-DCU_Esports_allmode.png"
USE_AIKAR_FLAGS = true
MAX_MEMORY = 18G
MOTD = "Powered by Redbrick"
MAX_PLAYERS = "32"
VIEW_DISTANCE = "32"
ENABLE_RCON = true
RCON_PASSWORD = {{ key "games/mc/esports-mc/rcon/password" }}
# Auto-download plugins
SPIGET_RESOURCES=83581,62325,118271,28140,102931 # RHLeafDecay, GSit, GravesX, Luckperms, NoChatReport
MODRINTH_PROJECTS=datapack:no-enderman-grief,thizzyz-tree-feller,imageframe,bmarker,datapack:players-drop-heads,viaversion,viabackwards
EOF
destination = "local/.env"
env = true
}
}
}
}

View file

@ -12,9 +12,6 @@ job "minecraft-vanilla" {
port "rcon" {
to = 25575
}
port "bluemap" {
to = 8100
}
}
service {
@ -27,48 +24,32 @@ job "minecraft-vanilla" {
port = "rcon"
}
service {
name = "vanilla-mc-bluemap"
port = "bluemap"
tags = [
"traefik.enable=true",
"traefik.http.routers.vanilla-mc-bluemap.rule=Host(`vanilla-mc.rb.dcu.ie`)",
"traefik.http.routers.vanilla-mc-bluemap.entrypoints=web,websecure",
"traefik.http.routers.vanilla-mc-bluemap.tls.certresolver=lets-encrypt",
]
}
task "minecraft-vanilla" {
driver = "docker"
config {
image = "itzg/minecraft-server"
ports = ["mc", "rcon", "bluemap"]
ports = ["mc", "rcon"]
volumes = [
"/storage/nomad/${NOMAD_TASK_NAME}:/data"
]
}
resources {
cpu = 5000 # 5000 MHz
memory = 20480 # 20 GB
cpu = 3000 # 3000 MHz
memory = 8192 # 8GB
}
template {
data = <<EOF
EULA = "TRUE"
TYPE = "PAPER"
VERSION = "1.21.3"
VERSION = "1.21.1"
ICON = "https://docs.redbrick.dcu.ie/assets/logo.png"
USE_AIKAR_FLAGS = true
MAX_MEMORY = 18G
MOTD = "LONG LIVE THE REDBRICK"
MAX_PLAYERS = "32"
VIEW_DISTANCE = "32"
MAX_PLAYERS = "20"
ENABLE_RCON = true
RCON_PASSWORD = {{ key "games/mc/vanilla-mc/rcon/password" }}
# Auto-download plugins
SPIGET_RESOURCES=83581,62325,118271,28140,102931 # RHLeafDecay, GSit, GravesX, Luckperms, NoChatReport
MODRINTH_PROJECTS=datapack:no-enderman-grief,thizzyz-tree-feller,imageframe,bluemap,bmarker,datapack:players-drop-heads,viaversion,viabackwards
EOF
destination = "local/.env"
env = true

View file

@ -14,9 +14,6 @@ job "traefik" {
port "admin" {
static = 8080
}
port "ssh" {
static = 22
}
port "smtp" {
static = 25
}
@ -41,12 +38,6 @@ job "traefik" {
port "managesieve" {
static = 4190
}
port "voice-tcp" {
static = 4502
}
port "voice-udp" {
static = 4503
}
}
service {
@ -64,7 +55,6 @@ job "traefik" {
volumes = [
"local/traefik.toml:/etc/traefik/traefik.toml",
"/storage/nomad/traefik/acme/acme.json:/acme.json",
"/storage/nomad/traefik/access.log:/access.log",
]
}
@ -83,9 +73,6 @@ job "traefik" {
[entryPoints.traefik]
address = ":8080"
[entryPoints.ssh]
address = ":22"
[entryPoints.smtp]
address = ":25"
@ -110,14 +97,6 @@ job "traefik" {
[entryPoints.managesieve]
address = ":4190"
[entryPoints.voice-tcp]
address = ":4502"
[entryPoints.voice-udp]
address = ":4503/udp"
[entryPoints.voice-udp.udp]
timeout = "15s" # this will help reduce random dropouts in audio https://github.com/mumble-voip/mumble/issues/3550#issuecomment-441495977
[tls.options]
[tls.options.default]
minVersion = "VersionTLS12"
@ -143,10 +122,6 @@ job "traefik" {
address = "127.0.0.1:8500"
scheme = "http"
# Enable the file provider for dynamic configuration.
[providers.file]
filename = "/local/dynamic.toml"
#[providers.nomad]
# [providers.nomad.endpoint]
# address = "127.0.0.1:4646"
@ -156,50 +131,9 @@ job "traefik" {
email = "elected-admins@redbrick.dcu.ie"
storage = "acme.json"
[certificatesResolvers.lets-encrypt.acme.tlsChallenge]
[tracing]
[accessLog]
filePath = "/access.log"
EOF
destination = "/local/traefik.toml"
}
template {
data = <<EOF
[http]
[http.middlewares]
# handle redirects for short links
# NOTE: this is a consul template, add entries via consul kv
# create the middlewares with replacements for each redirect
{{ range $pair := tree "redirect/redbrick" }}
[http.middlewares.redirect-{{ trimPrefix "redirect/redbrick/" $pair.Key }}.redirectRegex]
regex = ".*" # match everything - hosts are handled by the router
replacement = "{{ $pair.Value }}"
permanent = true
{{- end }}
[http.routers]
# create routers with middlewares for each redirect
{{ range $pair := tree "redirect/redbrick" }}
[http.routers.{{ trimPrefix "redirect/redbrick/" $pair.Key }}-redirect]
rule = "Host(`{{ trimPrefix "redirect/redbrick/" $pair.Key }}.redbrick.dcu.ie`)"
entryPoints = ["web", "websecure"]
middlewares = ["redirect-{{ trimPrefix "redirect/redbrick/" $pair.Key }}"]
service = "dummy-service" # all routers need a service, this isn't used
[http.routers.{{ trimPrefix "redirect/redbrick/" $pair.Key }}-redirect.tls]
{{- end }}
[http.services]
[http.services.dummy-service.loadBalancer]
[[http.services.dummy-service.loadBalancer.servers]]
url = "http://127.0.0.1" # Dummy service - not used
EOF
destination = "local/dynamic.toml"
change_mode = "noop"
}
}
}
}

View file

@ -1,44 +0,0 @@
job "uptime-kuma" {
datacenters = ["aperture"]
type = "service"
group "web" {
count = 1
network {
port "http" {
to = 3001
}
}
service {
port = "http"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.uptime-kuma.rule=Host(`status.redbrick.dcu.ie`)",
"traefik.http.routers.uptime-kuma.entrypoints=web,websecure",
"traefik.http.routers.uptime-kuma.tls.certresolver=lets-encrypt",
]
}
task "web" {
driver = "docker"
config {
image = "louislam/uptime-kuma:1"
ports = ["http"]
volumes = [
"/storage/nomad/uptime-kuma/data:/app/data"
]
}
}
}
}

View file

@ -27,18 +27,12 @@ job "atlas" {
tags = [
"traefik.enable=true",
"traefik.http.routers.nginx-atlas.rule=Host(`redbrick.dcu.ie`) || Host(`www.redbrick.dcu.ie`) || Host(`www.rb.dcu.ie`) || Host(`rb.dcu.ie`)",
"traefik.http.routers.nginx-atlas.rule=Host(`redbrick.dcu.ie`) || Host(`rb.dcu.ie`)",
"traefik.http.routers.nginx-atlas.entrypoints=web,websecure",
"traefik.http.routers.nginx-atlas.tls.certresolver=lets-encrypt",
"traefik.http.routers.nginx-atlas.middlewares=atlas-www-redirect,redirect-user-web",
# redirect redbrick.dcu.ie/~user to user.redbrick.dcu.ie
"traefik.http.routers.nginx-atlas.middlewares=redirect-user-web",
"traefik.http.middlewares.redirect-user-web.redirectregex.regex=https://redbrick\\.dcu\\.ie/~([^/]*)/?([^/].*)?",
"traefik.http.middlewares.redirect-user-web.redirectregex.replacement=https://$1.redbrick.dcu.ie/$2",
"traefik.http.middlewares.redirect-user-web.redirectregex.permanent=true",
# redirect www.redbrick.dcu.ie to redbrick.dcu.ie
"traefik.http.middlewares.atlas-www-redirect.redirectregex.regex=^https?://www.redbrick.dcu.ie/(.*)",
"traefik.http.middlewares.atlas-www-redirect.redirectregex.replacement=https://redbrick.dcu.ie/$${1}",
"traefik.http.middlewares.atlas-www-redirect.redirectregex.permanent=true",
]
}

View file

@ -1,59 +0,0 @@
job "github-actions-runner" {
datacenters = ["aperture"]
type = "service"
meta {
version = "2.320.0"
sha256 = "93ac1b7ce743ee85b5d386f5c1787385ef07b3d7c728ff66ce0d3813d5f46900"
}
group "github-actions" {
count = 3
spread {
attribute = "${node.unique.id}"
weight = 100
}
task "actions-runner" {
driver = "raw_exec"
# user = "nomad"
config {
command = "/bin/bash"
args = ["${NOMAD_TASK_DIR}/bootstrap.sh"]
}
template {
data = <<EOF
#!/bin/bash
export RUNNER_ALLOW_RUNASROOT=1
echo "Querying API for registration token..."
reg_token=$(curl -L \
-X POST \
-H "Accept: application/vnd.github+json" \
-H "Authorization: Bearer {{ key "github/actions-runner/token" }}" \
-H "X-GitHub-Api-Version: 2022-11-28" \
https://api.github.com/orgs/redbrick/actions/runners/registration-token | jq -r '.token')
echo "Configuring runner..."
bash -c "${NOMAD_TASK_DIR}/config.sh --unattended --url https://github.com/redbrick --token ${reg_token} --name $(hostname) --labels aperture,deployment-runner --replace"
echo "Running actions runner..."
bash "${NOMAD_TASK_DIR}/run.sh"
EOF
destination = "local/bootstrap.sh"
}
artifact {
source = "https://github.com/actions/runner/releases/download/v2.320.0/actions-runner-linux-x64-2.320.0.tar.gz"
options {
checksum = "sha256:93ac1b7ce743ee85b5d386f5c1787385ef07b3d7c728ff66ce0d3813d5f46900"
}
}
}
}
}

View file

@ -20,9 +20,9 @@ job "hedgedoc-backup" {
data = <<EOH
#!/bin/bash
file=/storage/backups/nomad/hedgedoc/postgresql-hedgedoc-$(date +%Y-%m-%d_%H-%M-%S).sql
file=/storage/backups/nomad/postgres/hedgedoc/postgresql-hedgedoc-$(date +%Y-%m-%d_%H-%M-%S).sql
mkdir -p /storage/backups/nomad/hedgedoc
mkdir -p /storage/backups/nomad/postgres/hedgedoc
alloc_id=$(nomad job status hedgedoc | grep running | tail -n 1 | cut -d " " -f 1)
@ -30,7 +30,7 @@ job_name=$(echo ${NOMAD_JOB_NAME} | cut -d "/" -f 1)
nomad alloc exec -task hedgedoc-db $alloc_id pg_dumpall -U {{ key "hedgedoc/db/user" }} > "${file}"
find /storage/backups/nomad/hedgedoc/postgresql-hedgedoc* -ctime +3 -exec rm {} \; || true
find /storage/backups/nomad/postgres/hedgedoc/postgresql-hedgedoc* -ctime +3 -exec rm {} \; || true
if [ -s "$file" ]; then # check if file exists and is not empty
echo "Backup successful"

View file

@ -21,7 +21,7 @@ job "hedgedoc" {
check {
type = "http"
path = "/_health"
path = "/"
interval = "10s"
timeout = "2s"
}
@ -41,54 +41,42 @@ job "hedgedoc" {
task "app" {
driver = "docker"
constraint {
attribute = "${attr.unique.hostname}"
value = "chell"
}
config {
image = "quay.io/hedgedoc/hedgedoc:1.10.2"
image = "quay.io/hedgedoc/hedgedoc:1.6.0"
ports = ["http"]
volumes = [
"/storage/nomad/hedgedoc/banner:/hedgedoc/public/banner",
]
}
template {
data = <<EOH
CMD_DB_URL = "postgres://{{ key "hedgedoc/db/user" }}:{{ key "hedgedoc/db/password" }}@{{ env "NOMAD_ADDR_db" }}/{{ key "hedgedoc/db/name" }}"
CMD_ALLOW_FREEURL = "false"
CMD_FORBIDDEN_NOTE_IDS = ['robots.txt', 'favicon.ico', 'api', 'build', 'css', 'docs', 'fonts', 'js', 'uploads', 'vendor', 'views', 'auth']
CMD_DOMAIN = "md.redbrick.dcu.ie"
CMD_ALLOW_ORIGIN = ["redbrick.dcu.ie", "rb.dcu.ie"]
CMD_USE_CDN = "true"
CMD_PROTOCOL_USESSL = "true"
CMD_URL_ADDPORT = "false"
CMD_LOG_LEVEL = "debug"
CMD_ENABLE_STATS_API = "true"
# Accounts
CMD_ALLOW_EMAIL_REGISTER = "false"
CMD_ALLOW_ANONYMOUS = "false"
CMD_ALLOW_ANONYMOUS_EDITS = "false"
CMD_EMAIL = "false"
CMD_LDAP_URL = "{{ key "hedgedoc/ldap/url" }}"
CMD_LDAP_SEARCHBASE = "ou=accounts,o=redbrick"
CMD_LDAP_SEARCHFILTER = "{{`(uid={{username}})`}}"
CMD_LDAP_PROVIDERNAME = "Redbrick"
CMD_LDAP_USERIDFIELD = "uidNumber"
CMD_LDAP_USERNAMEFIELD = "uid"
CMD_SESSION_SECRET = "{{ key "hedgedoc/session/secret" }}"
CMD_DEFAULT_PERMISSION = "limited"
# Security/Privacy
CMD_HSTS_PRELOAD = "true"
CMD_CSP_ENABLE = "true"
CMD_HSTS_INCLUDE_SUBDOMAINS = "true"
CMD_CSP_ADD_DISQUS = "false"
CMD_CSP_ADD_GOOGLE_ANALYTICS= "false"
CMD_CSP_ALLOW_PDF_EMBED = "true"
CMD_ALLOW_GRAVATAR = "true"
# Uploads
CMD_IMAGE_UPLOAD_TYPE = "imgur"
CMD_IMGUR_CLIENTID = "{{ key "hedgedoc/imgur/clientid" }}"
CMD_IMGUR_CLIENTSECRET = "{{ key "hedgedoc/imgur/clientsecret" }}"
CMD_IMAGE_UPLOAD_TYPE = "imgur"
CMD_IMGUR_CLIENTID = "{{ key "hedgedoc/imgur/clientid" }}"
CMD_IMGUR_CLIENTSECRET = "{{ key "hedgedoc/imgur/clientsecret" }}"
CMD_DB_URL = "postgres://{{ key "hedgedoc/db/user" }}:{{ key "hedgedoc/db/password" }}@{{ env "NOMAD_ADDR_db" }}/{{ key "hedgedoc/db/name" }}"
CMD_ALLOW_FREEURL = "false"
CMD_DEFAULT_PERMISSION = "private"
CMD_DOMAIN = "md.redbrick.dcu.ie"
CMD_ALLOW_ORIGIN = ["md.redbrick.dcu.ie", "md.rb.dcu.ie"]
CMD_HSTS_PRELOAD = "true"
CMD_USE_CDN = "true"
CMD_PROTOCOL_USESSL = "true"
CMD_URL_ADDPORT = "false"
CMD_ALLOW_EMAIL_REGISTER = "false"
CMD_ALLOW_ANONYMOUS = "false"
CMD_EMAIL = "false"
CMD_LDAP_URL = "{{ key "hedgedoc/ldap/url" }}"
CMD_LDAP_SEARCHBASE = "ou=accounts,o=redbrick"
CMD_LDAP_SEARCHFILTER = "{{`(uid={{username}})`}}"
CMD_LDAP_PROVIDERNAME = "Redbrick"
CMD_LDAP_USERIDFIELD = "uidNumber"
CMD_LDAP_USERNAMEFIELD = "uid"
CMD_ALLOW_GRAVATAR = "true"
CMD_SESSION_SECRET = "{{ key "hedgedoc/session/secret" }}"
CMD_LOG_LEVEL = "debug"
EOH
destination = "local/.env"
env = true
@ -98,12 +86,17 @@ EOH
task "hedgedoc-db" {
driver = "docker"
constraint {
attribute = "${attr.unique.hostname}"
value = "chell"
}
config {
image = "postgres:13.4-alpine"
image = "postgres:9.6-alpine"
ports = ["db"]
volumes = [
"/storage/nomad/hedgedoc:/var/lib/postgresql/data",
"/opt/postgres/hedgedoc:/var/lib/postgresql/data"
]
}

View file

@ -0,0 +1,86 @@
job "autodiscover" {
datacenters = ["aperture"]
type = "service"
meta {
tld = "rb.dcu.ie"
mail = "mail.rb.dcu.ie"
}
group "autodiscover" {
count = 1
network {
port "http" {
to = 8000
}
}
service {
name = "autodiscover"
port = "http"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.autodiscover.rule=Host(`autoconfig.${NOMAD_META_tld}`) || Host(`autodiscover.${NOMAD_META_tld}`)",
"traefik.http.routers.autodiscover.entrypoints=web,websecure",
"traefik.http.routers.autodiscover.tls.certresolver=lets-encrypt",
]
}
task "autodiscover" {
driver = "docker"
config {
image = "monogramm/autodiscover-email-settings:latest"
ports = ["http"]
}
template {
data = <<EOF
COMPANY_NAME=Redbrick
SUPPORT_URL=https://autodiscover.{{ env "NOMAD_META_tld" }}
DOMAIN={{ env "NOMAD_META_tld" }}
# IMAP configuration (host mandatory to enable)
IMAP_HOST={{ env "NOMAD_META_mail" }}
IMAP_PORT=993
IMAP_SOCKET=SSL
# POP configuration (host mandatory to enable)
POP_HOST={{ env "NOMAD_META_mail" }}
POP_PORT=995
POP_SOCKET=SSL
# SMTP configuration (host mandatory to enable)
SMTP_HOST={{ env "NOMAD_META_mail" }}
SMTP_PORT=587
SMTP_SOCKET=STARTTLS
# MobileSync/ActiveSync configuration (url mandatory to enable)
# MOBILESYNC_URL=https://sync.example.com
# MOBILESYNC_NAME=sync.example.com
# LDAP configuration (host mandatory to enable)
# LDAP_HOST=ldap.example.com
# LDAP_PORT=636
# LDAP_SOCKET=SSL
# LDAP_BASE=dc=ldap,dc=example,dc=com
# LDAP_USER_FIELD=uid
# LDAP_USER_BASE=ou=People,dc=ldap,dc=example,dc=com
# LDAP_SEARCH=(|(objectClass=PostfixBookMailAccount))
# Apple mobile config identifiers (identifier mandatory to enable)
# PROFILE_IDENTIFIER=com.example.autodiscover
# PROFILE_UUID=92943D26-CAB3-4086-897D-DC6C0D8B1E86
# MAIL_UUID=7A981A9E-D5D0-4EF8-87FE-39FD6A506FAC
# LDAP_UUID=6ECB6BA9-2208-4ABF-9E60-4E9F4CD7309E
EOF
destination = "local/autodiscover.env"
env = true
}
}
}
}

View file

@ -0,0 +1,646 @@
# -----------------------------------------------
# --- Mailserver Environment Variables ----------
# -----------------------------------------------
# DOCUMENTATION FOR THESE VARIABLES IS FOUND UNDER
# https://docker-mailserver.github.io/docker-mailserver/latest/config/environment/
# -----------------------------------------------
# --- General Section ---------------------------
# -----------------------------------------------
# empty => uses the `hostname` command to get the mail server's canonical hostname
# => Specify a fully-qualified domainname to serve mail for. This is used for many of the config features so if you can't set your hostname (e.g. you're in a container platform that doesn't let you) specify it in this environment variable.
OVERRIDE_HOSTNAME={{ env "NOMAD_META_DOMAIN" }}
# Set the log level for DMS.
# This is mostly relevant for container startup scripts and change detection event feedback.
#
# Valid values (in order of increasing verbosity) are: `error`, `warn`, `info`, `debug` and `trace`.
# The default log level is `info`.
# NOTE: Change this at some point plz
LOG_LEVEL=trace
# critical => Only show critical messages
# error => Only show erroneous output
# **warn** => Show warnings
# info => Normal informational output
# debug => Also show debug messages
# NOTE: Change this at some point plz
SUPERVISOR_LOGLEVEL=debug
# Support for deployment where these defaults are not compatible (eg: some NAS appliances):
# /var/mail vmail User ID (default: 5000)
DMS_VMAIL_UID=
# /var/mail vmail Group ID (default: 5000)
DMS_VMAIL_GID=
# **empty** => use FILE
# LDAP => use LDAP authentication
# OIDC => use OIDC authentication (not yet implemented)
# FILE => use local files (this is used as the default)
ACCOUNT_PROVISIONER=LDAP
# empty => postmaster@domain.com
# => Specify the postmaster address
POSTMASTER_ADDRESS={{ key "mail/postmaster/email" }}
# Check for updates on container start and then once a day
# If an update is available, a mail is sent to POSTMASTER_ADDRESS
# 0 => Update check disabled
# 1 => Update check enabled
ENABLE_UPDATE_CHECK=1
# Customize the update check interval.
# Number + Suffix. Suffix must be 's' for seconds, 'm' for minutes, 'h' for hours or 'd' for days.
UPDATE_CHECK_INTERVAL=1d
# Set different options for mynetworks option (can be overwrite in postfix-main.cf)
# **WARNING**: Adding the docker network's gateway to the list of trusted hosts, e.g. using the `network` or
# `connected-networks` option, can create an open relay
# https://github.com/docker-mailserver/docker-mailserver/issues/1405#issuecomment-590106498
# The same can happen for rootless podman. To prevent this, set the value to "none" or configure slirp4netns
# https://github.com/docker-mailserver/docker-mailserver/issues/2377
#
# none => Explicitly force authentication
# container => Container IP address only
# host => Add docker container network (ipv4 only)
# network => Add all docker container networks (ipv4 only)
# connected-networks => Add all connected docker networks (ipv4 only)
PERMIT_DOCKER=none
# Set the timezone. If this variable is unset, the container runtime will try to detect the time using
# `/etc/localtime`, which you can alternatively mount into the container. The value of this variable
# must follow the pattern `AREA/ZONE`, i.e. of you want to use Germany's time zone, use `Europe/Berlin`.
# You can lookup all available timezones here: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
TZ=Europe/Dublin
# In case you network interface differs from 'eth0', e.g. when you are using HostNetworking in Kubernetes,
# you can set NETWORK_INTERFACE to whatever interface you want. This interface will then be used.
# - **empty** => eth0
NETWORK_INTERFACE=
# empty => modern
# modern => Enables TLSv1.2 and modern ciphers only. (default)
# intermediate => Enables TLSv1, TLSv1.1 and TLSv1.2 and broad compatibility ciphers.
TLS_LEVEL=intermediate
# Configures the handling of creating mails with forged sender addresses.
#
# **0** => (not recommended) Mail address spoofing allowed. Any logged in user may create email messages with a forged sender address (see also https://en.wikipedia.org/wiki/Email_spoofing).
# 1 => Mail spoofing denied. Each user may only send with his own or his alias addresses. Addresses with extension delimiters(http://www.postfix.org/postconf.5.html#recipient_delimiter) are not able to send messages.
SPOOF_PROTECTION=1
# Enables the Sender Rewriting Scheme. SRS is needed if your mail server acts as forwarder. See [postsrsd](https://github.com/roehling/postsrsd/blob/master/README.md#sender-rewriting-scheme-crash-course) for further explanation.
# - **0** => Disabled
# - 1 => Enabled
ENABLE_SRS=1
# Enables the OpenDKIM service.
# **1** => Enabled
# 0 => Disabled
ENABLE_OPENDKIM=0
# Enables the OpenDMARC service.
# **1** => Enabled
# 0 => Disabled
ENABLE_OPENDMARC=0
# Enabled `policyd-spf` in Postfix's configuration. You will likely want to set this
# to `0` in case you're using Rspamd (`ENABLE_RSPAMD=1`).
#
# - 0 => Disabled
# - **1** => Enabled
ENABLE_POLICYD_SPF=0
# Enables POP3 service
# - **0** => Disabled
# - 1 => Enabled
ENABLE_POP3=0
# Enables IMAP service
# - 0 => Disabled
# - **1** => Enabled
ENABLE_IMAP=1
# Enables ClamAV, and anti-virus scanner.
# 1 => Enabled
# **0** => Disabled
ENABLE_CLAMAV=0
# Add the value of this ENV as a prefix to the mail subject when spam is detected.
# NOTE: This subject prefix may be redundant (by default spam is delivered to a junk folder).
# It provides value when your junk mail is stored alongside legitimate mail instead of a separate location (like with `SPAMASSASSIN_SPAM_TO_INBOX=1` or `MOVE_SPAM_TO_JUNK=0` or a POP3 only setup, without IMAP).
# NOTE: When not using Docker Compose, other CRI may not support quote-wrapping the value here to preserve any trailing white-space.
SPAM_SUBJECT=
# Enables Rspamd
# **0** => Disabled
# 1 => Enabled
ENABLE_RSPAMD=1
# When `ENABLE_RSPAMD=1`, an internal Redis instance is enabled implicitly.
# This setting provides an opt-out to allow using an external instance instead.
# 0 => Disabled
# 1 => Enabled
ENABLE_RSPAMD_REDIS=1
# When enabled,
#
# 1. the "[autolearning][rspamd-autolearn]" feature is turned on;
# 2. the Bayes classifier will be trained when moving mails from or to the Junk folder (with the help of Sieve scripts).
#
# **0** => disabled
# 1 => enabled
RSPAMD_LEARN=1
# This settings controls whether checks should be performed on emails coming
# from authenticated users (i.e. most likely outgoing emails). The default value
# is `0` in order to align better with SpamAssassin. We recommend reading
# through https://rspamd.com/doc/tutorials/scanning_outbound.html though to
# decide for yourself whether you need and want this feature.
#
# Note that DKIM signing of e-mails will still happen.
RSPAMD_CHECK_AUTHENTICATED=0
# Controls whether the Rspamd Greylisting module is enabled.
# This module can further assist in avoiding spam emails by greylisting
# e-mails with a certain spam score.
#
# **0** => disabled
# 1 => enabled
RSPAMD_GREYLISTING=0
# Can be used to enable or disable the Hfilter group module.
#
# - 0 => Disabled
# - **1** => Enabled
RSPAMD_HFILTER=1
# Can be used to control the score when the HFILTER_HOSTNAME_UNKNOWN symbol applies. A higher score is more punishing. Setting it to 15 is equivalent to rejecting the email when the check fails.
#
# Default: 6
RSPAMD_HFILTER_HOSTNAME_UNKNOWN_SCORE=6
# Can be used to enable or disable the (still experimental) neural module.
#
# - **0** => Disabled
# - 1 => Enabled
RSPAMD_NEURAL=0
# Amavis content filter (used for ClamAV & SpamAssassin)
# 0 => Disabled
# 1 => Enabled
ENABLE_AMAVIS=1
# -1/-2/-3 => Only show errors
# **0** => Show warnings
# 1/2 => Show default informational output
# 3/4/5 => log debug information (very verbose)
AMAVIS_LOGLEVEL=0
# This enables DNS block lists in Postscreen.
# Note: Emails will be rejected, if they don't pass the block list checks!
# **0** => DNS block lists are disabled
# 1 => DNS block lists are enabled
ENABLE_DNSBL=0
# If you enable Fail2Ban, don't forget to add the following lines to your `compose.yaml`:
# cap_add:
# - NET_ADMIN
# Otherwise, `nftables` won't be able to ban IPs.
ENABLE_FAIL2BAN=1
# Fail2Ban blocktype
# drop => drop packet (send NO reply)
# reject => reject packet (send ICMP unreachable)
FAIL2BAN_BLOCKTYPE=drop
# 1 => Enables Managesieve on port 4190
# empty => disables Managesieve
ENABLE_MANAGESIEVE=1
# **enforce** => Allow other tests to complete. Reject attempts to deliver mail with a 550 SMTP reply, and log the helo/sender/recipient information. Repeat this test the next time the client connects.
# drop => Drop the connection immediately with a 521 SMTP reply. Repeat this test the next time the client connects.
# ignore => Ignore the failure of this test. Allow other tests to complete. Repeat this test the next time the client connects. This option is useful for testing and collecting statistics without blocking mail.
# NOTE: Change this at some point plz
POSTSCREEN_ACTION=ignore
# empty => all daemons start
# 1 => only launch postfix smtp
SMTP_ONLY=
# Please read [the SSL page in the documentation](https://docker-mailserver.github.io/docker-mailserver/latest/config/security/ssl) for more information.
#
# empty => SSL disabled
# letsencrypt => Enables Let's Encrypt certificates
# custom => Enables custom certificates
# manual => Let's you manually specify locations of your SSL certificates for non-standard cases
# self-signed => Enables self-signed certificates
SSL_TYPE=letsencrypt
# Set how many days a virusmail will stay on the server before being deleted
# empty => 7 days
VIRUSMAILS_DELETE_DELAY=
# Configure Postfix `virtual_transport` to deliver mail to a different LMTP client (default is a dovecot socket).
# Provide any valid URI. Examples:
#
# empty => `lmtp:unix:/var/run/dovecot/lmtp` (default, configured in Postfix main.cf)
# `lmtp:unix:private/dovecot-lmtp` (use socket)
# `lmtps:inet:<host>:<port>` (secure lmtp with starttls)
# `lmtp:<kopano-host>:2003` (use kopano as mailstore)
POSTFIX_DAGENT=
# Set the mailbox size limit for all users. If set to zero, the size will be unlimited (default). Size is in bytes.
#
# empty => 0
# NOTE: Set this at some point plz
POSTFIX_MAILBOX_SIZE_LIMIT=
# See https://docker-mailserver.github.io/docker-mailserver/latest/config/account-management/overview/#quotas
# 0 => Dovecot quota is disabled
# 1 => Dovecot quota is enabled
ENABLE_QUOTAS=1
# Set the message size limit for all users. If set to zero, the size will be unlimited (not recommended!). Size is in bytes.
#
# empty => 10240000 (~10 MB)
POSTFIX_MESSAGE_SIZE_LIMIT=
# Mails larger than this limit won't be scanned.
# ClamAV must be enabled (ENABLE_CLAMAV=1) for this.
#
# empty => 25M (25 MB)
CLAMAV_MESSAGE_SIZE_LIMIT=
# Enables regular pflogsumm mail reports.
# This is a new option. The old REPORT options are still supported for backwards compatibility. If this is not set and reports are enabled with the old options, logrotate will be used.
#
# not set => No report
# daily_cron => Daily report for the previous day
# logrotate => Full report based on the mail log when it is rotated
# NOTE: Set this at some point plz
PFLOGSUMM_TRIGGER=
# Recipient address for pflogsumm reports.
#
# not set => Use REPORT_RECIPIENT or POSTMASTER_ADDRESS
# => Specify the recipient address(es)
PFLOGSUMM_RECIPIENT={{ key "mail/postmaster/email" }}
# Sender address (`FROM`) for pflogsumm reports if pflogsumm reports are enabled.
#
# not set => Use REPORT_SENDER
# => Specify the sender address
PFLOGSUMM_SENDER={{ key "mail/report/email" }}
# Interval for logwatch report.
#
# none => No report is generated
# daily => Send a daily report
# weekly => Send a report every week
LOGWATCH_INTERVAL=
# Recipient address for logwatch reports if they are enabled.
#
# not set => Use REPORT_RECIPIENT or POSTMASTER_ADDRESS
# => Specify the recipient address(es)
LOGWATCH_RECIPIENT={{ key "mail/postmaster/email" }}
# Sender address (`FROM`) for logwatch reports if logwatch reports are enabled.
#
# not set => Use REPORT_SENDER
# => Specify the sender address
LOGWATCH_SENDER={{ key "mail/report/email" }}
# Defines who receives reports if they are enabled.
# **empty** => ${POSTMASTER_ADDRESS}
# => Specify the recipient address
REPORT_RECIPIENT={{ key "mail/postmaster/email" }}
# Defines who sends reports if they are enabled.
# **empty** => mailserver-report@${DOMAINNAME}
# => Specify the sender address
REPORT_SENDER={{ key "mail/report/email" }}
# Changes the interval in which log files are rotated
# **weekly** => Rotate log files weekly
# daily => Rotate log files daily
# monthly => Rotate log files monthly
#
# Note: This Variable actually controls logrotate inside the container
# and rotates the log files depending on this setting. The main log output is
# still available in its entirety via `docker logs mail` (Or your
# respective container name). If you want to control logrotation for
# the Docker-generated logfile see:
# https://docs.docker.com/config/containers/logging/configure/
#
# Note: This variable can also determine the interval for Postfix's log summary reports, see [`PFLOGSUMM_TRIGGER`](#pflogsumm_trigger).
LOGROTATE_INTERVAL=weekly
# Defines how many log files are kept by logrorate
LOGROTATE_COUNT=12
# If enabled, employs `reject_unknown_client_hostname` to sender restrictions in Postfix's configuration.
#
# - **0** => Disabled
# - 1 => Enabled
POSTFIX_REJECT_UNKNOWN_CLIENT_HOSTNAME=0
# Choose TCP/IP protocols for postfix to use
# **all** => All possible protocols.
# ipv4 => Use only IPv4 traffic. Most likely you want this behind Docker.
# ipv6 => Use only IPv6 traffic.
#
# Note: More details at http://www.postfix.org/postconf.5.html#inet_protocols
POSTFIX_INET_PROTOCOLS=ipv4
# Enables MTA-STS support for outbound mail.
# More details: https://docker-mailserver.github.io/docker-mailserver/v13.3/config/best-practices/mta-sts/
# - **0** ==> MTA-STS disabled
# - 1 => MTA-STS enabled
ENABLE_MTA_STS=1
# Choose TCP/IP protocols for dovecot to use
# **all** => Listen on all interfaces
# ipv4 => Listen only on IPv4 interfaces. Most likely you want this behind Docker.
# ipv6 => Listen only on IPv6 interfaces.
#
# Note: More information at https://dovecot.org/doc/dovecot-example.conf
DOVECOT_INET_PROTOCOLS=ipv4
# -----------------------------------------------
# --- SpamAssassin Section ----------------------
# -----------------------------------------------
ENABLE_SPAMASSASSIN=1
# KAM is a 3rd party SpamAssassin ruleset, provided by the McGrail Foundation.
# If SpamAssassin is enabled, KAM can be used in addition to the default ruleset.
# - **0** => KAM disabled
# - 1 => KAM enabled
#
# Note: only has an effect if `ENABLE_SPAMASSASSIN=1`
ENABLE_SPAMASSASSIN_KAM=1
# deliver spam messages to the inbox (tagged using SPAM_SUBJECT)
SPAMASSASSIN_SPAM_TO_INBOX=1
# spam messages will be moved in the Junk folder (SPAMASSASSIN_SPAM_TO_INBOX=1 required)
MOVE_SPAM_TO_JUNK=1
# spam messages will be marked as read
MARK_SPAM_AS_READ=0
# add 'spam info' headers at, or above this level
SA_TAG=2.0
# add 'spam detected' headers at, or above this level
SA_TAG2=6.31
# triggers spam evasive actions
SA_KILL=10.0
# -----------------------------------------------
# --- Fetchmail Section -------------------------
# -----------------------------------------------
ENABLE_FETCHMAIL=0
# The interval to fetch mail in seconds
FETCHMAIL_POLL=300
# Use multiple fetchmail instances (1 per poll entry in fetchmail.cf)
# Supports multiple IMAP IDLE connections when a server is used across multiple poll entries
# https://otremba.net/wiki/Fetchmail_(Debian)#Immediate_Download_via_IMAP_IDLE
FETCHMAIL_PARALLEL=0
# Enable or disable `getmail`.
#
# - **0** => Disabled
# - 1 => Enabled
ENABLE_GETMAIL=0
# The number of minutes for the interval. Min: 1; Max: 30.
GETMAIL_POLL=5
# -----------------------------------------------
# --- LDAP Section ------------------------------
# -----------------------------------------------
# A second container for the ldap service is necessary (i.e. https://hub.docker.com/r/bitnami/openldap/)
# empty => no
# yes => LDAP over TLS enabled for Postfix
LDAP_START_TLS=
# empty => mail.example.com
# Specify the `<dns-name>` / `<ip-address>` where the LDAP server is reachable via a URI like: `ldaps://mail.example.com`.
# Note: You must include the desired URI scheme (`ldap://`, `ldaps://`, `ldapi://`).
LDAP_SERVER_HOST={{ key "mail/ldap/host" }}
# empty => ou=people,dc=domain,dc=com
# => e.g. LDAP_SEARCH_BASE=dc=mydomain,dc=local
LDAP_SEARCH_BASE={{ key "mail/ldap/searchbase" }}
# empty => cn=admin,dc=domain,dc=com
# => take a look at examples of SASL_LDAP_BIND_DN
LDAP_BIND_DN={{ key "mail/ldap/bind/dn" }}
# empty** => admin
# => Specify the password to bind against ldap
LDAP_BIND_PW={{ key "mail/ldap/bind/password" }}
# e.g. `"(&(mail=%s)(mailEnabled=TRUE))"`
# => Specify how ldap should be asked for users
LDAP_QUERY_FILTER_USER="(&(objectClass=posixAccount)(uid=%u))"
# e.g. `"(&(mailGroupMember=%s)(mailEnabled=TRUE))"`
# => Specify how ldap should be asked for groups
# NOTE: Change this at some point plz
LDAP_QUERY_FILTER_GROUP=
# e.g. `"(&(mailAlias=%s)(mailEnabled=TRUE))"`
# => Specify how ldap should be asked for aliases
# NOTE: Change this at some point plz
LDAP_QUERY_FILTER_ALIAS=
# e.g. `"(&(|(mail=*@%s)(mailalias=*@%s)(mailGroupMember=*@%s))(mailEnabled=TRUE))"`
# => Specify how ldap should be asked for domains
# NOTE: Change this at some point plz
LDAP_QUERY_FILTER_DOMAIN=
# -----------------------------------------------
# --- Dovecot Section ---------------------------
# -----------------------------------------------
# empty => no
# yes => LDAP over TLS enabled for Dovecot
DOVECOT_TLS=
# e.g. `"(&(objectClass=PostfixBookMailAccount)(uniqueIdentifier=%n))"`
DOVECOT_USER_FILTER="(&(objectclass=posixAccount)(uid=%n))"
# e.g. `"(&(objectClass=PostfixBookMailAccount)(uniqueIdentifier=%n))"`
# DOVECOT_PASS_FILTER=(&(objectclass=posixAccount)(uid=%n))
# Define the mailbox format to be used
# default is maildir, supported values are: sdbox, mdbox, maildir
DOVECOT_MAILBOX_FORMAT=mdbox
# empty => no
# yes => Allow bind authentication for LDAP
# https://wiki.dovecot.org/AuthDatabase/LDAP/AuthBinds
DOVECOT_AUTH_BIND=yes
DOVECOT_USER_ATTRS=uid=uid,homeDirectory=home
DOVECOT_PASS_FILTER="(&(objectclass=posixAccount)(uid=%n))"
DOVECOT_PASS_ATTRS=uid=uid,homeDirectory=home,userPassword=password
# -----------------------------------------------
# --- Postgrey Section --------------------------
# -----------------------------------------------
ENABLE_POSTGREY=1
# greylist for N seconds
POSTGREY_DELAY=30
# delete entries older than N days since the last time that they have been seen
POSTGREY_MAX_AGE=35
# response when a mail is greylisted
POSTGREY_TEXT="Delayed by Postgrey"
# whitelist host after N successful deliveries (N=0 to disable whitelisting)
POSTGREY_AUTO_WHITELIST_CLIENTS=5
# -----------------------------------------------
# --- SASL Section ------------------------------
# -----------------------------------------------
ENABLE_SASLAUTHD=1
# empty => pam
# `ldap` => authenticate against ldap server
# `shadow` => authenticate against local user db
# `mysql` => authenticate against mysql db
# `rimap` => authenticate against imap server
# Note: can be a list of mechanisms like pam ldap shadow
SASLAUTHD_MECHANISMS=ldap
# empty => None
# e.g. with SASLAUTHD_MECHANISMS rimap you need to specify the ip-address/servername of the imap server ==> xxx.xxx.xxx.xxx
SASLAUTHD_MECH_OPTIONS=
# Note: You must include the desired URI scheme (`ldap://`, `ldaps://`, `ldapi://`).
SASLAUTHD_LDAP_SERVER={{ key "mail/ldap/host" }}
# specify an object with privileges to search the directory tree
# e.g. active directory: SASLAUTHD_LDAP_BIND_DN=cn=Administrator,cn=Users,dc=mydomain,dc=net
# e.g. openldap: SASLAUTHD_LDAP_BIND_DN=cn=admin,dc=mydomain,dc=net
SASLAUTHD_LDAP_BIND_DN={{ key "mail/ldap/bind/dn" }}
SASLAUTHD_LDAP_PASSWORD={{ key "mail/ldap/bind/password" }}
# specify the search base
SASLAUTHD_LDAP_SEARCH_BASE={{ key "mail/ldap/searchbase" }}
# e.g. for active directory: `(&(sAMAccountName=%U)(objectClass=person))`
# e.g. for openldap: `(&(uid=%U)(objectClass=person))`
SASLAUTHD_LDAP_FILTER=(&(objectclass=posixAccount)(uid=%n))
# empty => no
# yes => LDAP over TLS enabled for SASL
# If set to yes, the protocol in SASLAUTHD_LDAP_SERVER must be ldap:// or missing.
SASLAUTHD_LDAP_START_TLS=
# empty => no
# yes => Require and verify server certificate
# If yes you must/could specify SASLAUTHD_LDAP_TLS_CACERT_FILE or SASLAUTHD_LDAP_TLS_CACERT_DIR.
SASLAUTHD_LDAP_TLS_CHECK_PEER=
# File containing CA (Certificate Authority) certificate(s).
# empty => Nothing is added to the configuration
# Any value => Fills the `ldap_tls_cacert_file` option
SASLAUTHD_LDAP_TLS_CACERT_FILE=
# Path to directory with CA (Certificate Authority) certificates.
# empty => Nothing is added to the configuration
# Any value => Fills the `ldap_tls_cacert_dir` option
SASLAUTHD_LDAP_TLS_CACERT_DIR=
# Specify what password attribute to use for password verification.
# empty => Nothing is added to the configuration but the documentation says it is `userPassword` by default.
# Any value => Fills the `ldap_password_attr` option
SASLAUTHD_LDAP_PASSWORD_ATTR=
# empty => `bind` will be used as a default value
# `fastbind` => The fastbind method is used
# `custom` => The custom method uses userPassword attribute to verify the password
SASLAUTHD_LDAP_AUTH_METHOD=bind
# Specify the authentication mechanism for SASL bind
# empty => Nothing is added to the configuration
# Any value => Fills the `ldap_mech` option
SASLAUTHD_LDAP_MECH=
# -----------------------------------------------
# --- SRS Section -------------------------------
# -----------------------------------------------
# envelope_sender => Rewrite only envelope sender address (default)
# header_sender => Rewrite only header sender (not recommended)
# envelope_sender,header_sender => Rewrite both senders
# An email has an "envelope" sender (indicating the sending server) and a
# "header" sender (indicating who sent it). More strict SPF policies may require
# you to replace both instead of just the envelope sender.
SRS_SENDER_CLASSES=envelope_sender
# empty => Envelope sender will be rewritten for all domains
# provide comma separated list of domains to exclude from rewriting
SRS_EXCLUDE_DOMAINS=
# empty => generated when the image is built
# provide a secret to use in base64
# you may specify multiple keys, comma separated. the first one is used for
# signing and the remaining will be used for verification. this is how you
# rotate and expire keys
SRS_SECRET={{ key "mail/srs/secret" }}
# -----------------------------------------------
# --- Default Relay Host Section ----------------
# -----------------------------------------------
# Setup relaying all mail through a default relay host
#
# Set a default host to relay all mail through (optionally include a port)
# Example: [mail.example.com]:587
DEFAULT_RELAY_HOST=
# -----------------------------------------------
# --- Multi-Domain Relay Section ----------------
# -----------------------------------------------
# Setup relaying for multiple domains based on the domain name of the sender
# optionally uses usernames and passwords in postfix-sasl-password.cf and relay host mappings in postfix-relaymap.cf
#
# Set a default host to relay mail through
# Example: mail.example.com
RELAY_HOST=
# empty => 25
# default port to relay mail
RELAY_PORT=25
# -----------------------------------------------
# --- Relay Host Credentials Section ------------
# -----------------------------------------------
# Configure a relay user and password to use with RELAY_HOST / DEFAULT_RELAY_HOST
# empty => no default
RELAY_USER=
# empty => no default
RELAY_PASSWORD=

View file

@ -0,0 +1,236 @@
job "mailserver" {
datacenters = ["aperture"]
type = "service"
meta {
tld = "rb.dcu.ie"
domain = "mail.rb.dcu.ie"
}
group "mail" {
network {
# mode = "bridge"
port "http" {
to = 80
}
port "smtp" {
to = 25
}
port "submissions" {
to = 465
}
port "submission" {
to = 587
}
port "imap" {
to = 143
}
port "imaps" {
to = 993
}
port "pop3" {
to = 110
}
port "pop3s" {
to = 995
}
port "managesieve" {
to = 4190
}
}
task "whoami" {
driver = "docker"
config {
image = "traefik/whoami"
ports = ["http"]
}
service {
name = "whoami"
port = "http"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.port=${NOMAD_PORT_http}",
"traefik.http.routers.mail-http.rule=Host(`${NOMAD_META_domain}`)",
"traefik.http.routers.mail-http.entrypoints=web,websecure",
"traefik.http.routers.mail-http.tls.certresolver=lets-encrypt",
]
}
}
service {
name = "mail"
# port = "http"
tags = [
"traefik.enable=true",
# Explicit TLS (STARTTLS):
# SMTP
"traefik.tcp.routers.mail-smtp.rule=HostSNI(`*`)",
"traefik.tcp.routers.mail-smtp.entrypoints=smtp",
"traefik.tcp.routers.mail-smtp.service=mail-smtp",
"traefik.tcp.services.mail-smtp.loadbalancer.server.port=${NOMAD_HOST_PORT_smtp}",
"traefik.tcp.services.mail-smtp.loadbalancer.proxyProtocol.version=2",
# SMTP Submission
"traefik.tcp.routers.mail-submission.rule=HostSNI(`*`)",
"traefik.tcp.routers.mail-submission.entrypoints=submission",
"traefik.tcp.routers.mail-submission.service=mail-submission",
"traefik.tcp.services.mail-submission.loadbalancer.server.port=${NOMAD_HOST_PORT_submission}",
"traefik.tcp.services.mail-submission.loadbalancer.proxyProtocol.version=2",
# IMAP
"traefik.tcp.routers.mail-imap.rule=HostSNI(`*`)",
"traefik.tcp.routers.mail-imap.entrypoints=imap",
"traefik.tcp.routers.mail-imap.service=mail-imap",
"traefik.tcp.services.mail-imap.loadbalancer.server.port=${NOMAD_HOST_PORT_imap}",
"traefik.tcp.services.mail-imap.loadbalancer.proxyProtocol.version=2",
# POP3
"traefik.tcp.routers.mail-pop3.rule=HostSNI(`*`)",
"traefik.tcp.routers.mail-pop3.entrypoints=pop3",
"traefik.tcp.routers.mail-pop3.service=mail-pop3",
"traefik.tcp.services.mail-pop3.loadbalancer.server.port=${NOMAD_HOST_PORT_pop3}",
"traefik.tcp.services.mail-pop3.loadbalancer.proxyProtocol.version=2",
# ManageSieve
"traefik.tcp.routers.mail-managesieve.rule=HostSNI(`*`)",
"traefik.tcp.routers.mail-managesieve.entrypoints=managesieve",
"traefik.tcp.routers.mail-managesieve.service=mail-managesieve",
"traefik.tcp.services.mail-managesieve.loadbalancer.server.port=${NOMAD_HOST_PORT_managesieve}",
"traefik.tcp.services.mail-managesieve.loadbalancer.proxyProtocol.version=2",
# Implicit TLS is no different, except for optional HostSNI support:
# SMTP Submission Secure
# "traefik.tcp.routers.mail-submissions.rule=HostSNI(`*`)",
"traefik.tcp.routers.mail-submissions.entrypoints=submissions",
"traefik.tcp.routers.mail-submissions.service=mail-submissions",
"traefik.tcp.services.mail-submissions.loadbalancer.server.port=${NOMAD_HOST_PORT_submissions}",
"traefik.tcp.services.mail-submissions.loadbalancer.proxyProtocol.version=2",
# NOTE: Optionally match by SNI rule, this requires TLS passthrough (not compatible with STARTTLS):
"traefik.tcp.routers.mail-submissions.rule=HostSNI(`${NOMAD_META_domain}`)",
"traefik.tcp.routers.mail-submissions.tls.passthrough=true",
# IMAP Secure
# "traefik.tcp.routers.mail-imaps.rule=HostSNI(`*`)",
"traefik.tcp.routers.mail-imaps.entrypoints=imaps",
"traefik.tcp.routers.mail-imaps.service=mail-imaps",
"traefik.tcp.services.mail-imaps.loadbalancer.server.port=${NOMAD_HOST_PORT_imaps}",
"traefik.tcp.services.mail-imaps.loadbalancer.proxyProtocol.version=2",
# NOTE: Optionally match by SNI rule, this requires TLS passthrough (not compatible with STARTTLS):
"traefik.tcp.routers.mail-imaps.rule=HostSNI(`${NOMAD_META_domain}`)",
"traefik.tcp.routers.mail-imaps.tls.passthrough=true",
# POP3 Secure
# "traefik.tcp.routers.mail-pop3s.rule=HostSNI(`*`)",
"traefik.tcp.routers.mail-pop3s.entrypoints=pop3s",
"traefik.tcp.routers.mail-pop3s.service=mail-pop3s",
"traefik.tcp.services.mail-pop3s.loadbalancer.server.port=${NOMAD_HOST_PORT_pop3s}",
"traefik.tcp.services.mail-pop3s.loadbalancer.proxyProtocol.version=2",
# NOTE: Optionally match by SNI rule, this requires TLS passthrough (not compatible with STARTTLS):
"traefik.tcp.routers.mail-pop3s.rule=HostSNI(`${NOMAD_META_domain}`)",
"traefik.tcp.routers.mail-pop3s.tls.passthrough=true",
]
}
task "mail-server" {
driver = "docker"
config {
image = "ghcr.io/docker-mailserver/docker-mailserver:latest"
ports = ["smtp", "submissions", "submission", "imap", "imaps", "pop3", "pop3s", "managesieve"]
hostname = "${NOMAD_META_domain}"
volumes = [
"/storage/nomad/mail/data/:/var/mail/",
"/storage/nomad/mail/state/:/var/mail-state/",
"/storage/nomad/mail/logs/:/var/log/mail/",
"/storage/nomad/mail/config/:/tmp/docker-mailserver/",
# "local/postfix-virtual.cf:/tmp/docker-mailserver/postfix-virtual.cf",
"local/postfix-master.cf:/tmp/docker-mailserver/postfix-master.cf",
"local/dovecot.cf:/tmp/docker-mailserver/dovecot.cf",
"/etc/localtime:/etc/localtime:ro",
"/oldstorage/home:/home/:ro",
"/storage/nomad/traefik/acme/acme.json:/etc/letsencrypt/acme.json:ro",
]
}
resources {
cpu = 2000
memory = 5000
}
template {
data = file("mailserver.env")
destination = "local/mailserver.env"
env = true
}
template {
data = file("postfix-virtual.cf")
destination = "local/postfix-virtual.cf"
}
template {
data = <<EOF
# Enable proxy protocol support for postfix
smtp/inet/postscreen_upstream_proxy_protocol=haproxy
submission/inet/smtpd_upstream_proxy_protocol=haproxy
submissions/inet/smtpd_upstream_proxy_protocol=haproxy
EOF
destination = "local/postfix-master.cf"
}
template {
data = <<EOF
# Enable proxy protocol support for dovecot
haproxy_trusted_networks = 136.206.16.50
service imap-login {
inet_listener imap {
haproxy = yes
}
inet_listener imaps {
haproxy = yes
}
}
service pop3-login {
inet_listener pop3 {
haproxy = yes
}
inet_listener pop3s {
haproxy = yes
}
}
service managesieve-login {
inet_listener sieve {
haproxy = yes
}
}
EOF
destination = "local/dovecot.cf"
}
}
}
}

View file

@ -0,0 +1,91 @@
job "roundcube" {
datacenters = ["aperture"]
type = "service"
group "roundcube" {
count = 1
network {
port "http" {
to = 80
}
port "db" {
to = 5432
}
}
service {
name = "roundcube-web"
port = "http"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.roundcube.rule=Host(`webmail.rb.dcu.ie`)",
"traefik.http.routers.roundcube.entrypoints=web,websecure",
"traefik.http.routers.roundcube.tls.certresolver=lets-encrypt",
]
}
task "roundcube" {
driver = "docker"
config {
image = "roundcube/roundcubemail:latest"
ports = ["http"]
hostname = "webmail.rb.dcu.ie"
volumes = [
"/storage/nomad/roundcube/www:/var/www/html",
]
}
template {
destination = "local/.env"
env = true
change_mode = "restart"
data = <<EOH
ROUNDCUBEMAIL_DB_TYPE=pgsql
ROUNDCUBEMAIL_DB_HOST={{ env "NOMAD_IP_db" }}
ROUNDCUBEMAIL_DB_PORT={{ env "NOMAD_HOST_PORT_db" }}
ROUNDCUBEMAIL_DB_NAME={{ key "roundcube/db/name" }}
ROUNDCUBEMAIL_DB_USER={{ key "roundcube/db/user" }}
ROUNDCUBEMAIL_DB_PASSWORD={{ key "roundcube/db/password" }}
ROUNDCUBEMAIL_SKIN=elastic
ROUNDCUBEMAIL_DEFAULT_HOST={{ key "roundcube/imap/host" }}
ROUNDCUBEMAIL_DEFAULT_PORT={{ key "roundcube/imap/port" }}
ROUNDCUBEMAIL_SMTP_SERVER={{ key "roundcube/smtp/host" }}
ROUNDCUBEMAIL_SMTP_PORT={{ key "roundcube/smtp/port" }}
EOH
}
}
task "roundcube-db" {
driver = "docker"
config {
image = "postgres:17-alpine"
ports = ["db"]
volumes = [
"/storage/nomad/roundcube/db:/var/lib/postgresql/data"
]
}
template {
data = <<EOH
POSTGRES_PASSWORD={{ key "roundcube/db/password" }}
POSTGRES_USER={{ key "roundcube/db/user" }}
POSTGRES_NAME={{ key "roundcube/db/name" }}
EOH
destination = "local/db.env"
env = true
}
}
}
}

View file

@ -1,118 +0,0 @@
job "paperless" {
datacenters = ["aperture"]
type = "service"
group "paperless-web" {
network {
port "http" {
to = 8000
}
port "redis" {
to = 6379
}
port "db" {
to = 5432
}
}
service {
name = "paperless"
port = "http"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.paperless.rule=Host(`paperless.redbrick.dcu.ie`) || Host(`paperless.rb.dcu.ie`)",
"traefik.http.routers.paperless.entrypoints=websecure",
"traefik.http.routers.paperless.tls=true",
"traefik.http.routers.paperless.tls.certresolver=lets-encrypt",
"traefik.http.middlewares.paperless.headers.contentSecurityPolicy=default-src 'self'; img-src 'self' data:"
]
}
task "web" {
driver = "docker"
config {
image = "ghcr.io/paperless-ngx/paperless-ngx:latest"
ports = ["http"]
volumes = [
"/storage/nomad/paperless/consume:/usr/src/paperless/consume",
"/storage/nomad/paperless/data:/usr/src/paperless/data",
"/storage/nomad/paperless/media:/usr/src/paperless/media",
"/storage/nomad/paperless/export:/usr/src/paperless/export",
"/storage/nomad/paperless/preconsume:/usr/src/paperless/preconsume",
]
}
template {
data = <<EOH
PAPERLESS_REDIS = "redis://{{ env "NOMAD_ADDR_redis" }}"
PAPERLESS_DBHOST = "{{ env "NOMAD_IP_db" }}"
PAPERLESS_DBPORT = "{{ env "NOMAD_HOST_PORT_db" }}"
PAPERLESS_DBPASS={{ key "paperless/db/password" }}
PAPERLESS_DBUSER={{ key "paperless/db/user" }}
PAPERLESS_DBNAME={{ key "paperless/db/name" }}
PAPERLESS_SECRETKEY={{ key "paperless/secret_key" }}
PAPERLESS_URL=https://paperless.redbrick.dcu.ie
PAPERLESS_ADMIN_USER={{ key "paperless/admin/user" }}
PAPERLESS_ADMIN_PASSWORD={{ key "paperless/admin/password" }}
PAPERLESS_ALLOWED_HOSTS="paperless.redbrick.dcu.ie,paperless.rb.dcu.ie,10.10.0.4,10.10.0.5,10.10.0.6" # allow internal aperture IPs for health check
PAPERLESS_CONSUMER_POLLING=1
EOH
destination = "local/.env"
env = true
}
# PAPERLESS_PRE_CONSUME_SCRIPT={{ key "paperless/env/preconsume-script" }}
resources {
cpu = 800
memory = 1000
}
}
task "broker" {
driver = "docker"
config {
image = "docker.io/library/redis:7"
ports = ["redis"]
}
resources {
cpu = 300
memory = 50
}
}
task "db" {
driver = "docker"
config {
image = "postgres:16-alpine"
ports = ["db"]
volumes = [
"/storage/nomad/paperless/db:/var/lib/postgresql/data"
]
}
template {
data = <<EOH
POSTGRES_PASSWORD={{ key "paperless/db/password" }}
POSTGRES_USER={{ key "paperless/db/user" }}
POSTGRES_NAME={{ key "paperless/db/name" }}
EOH
destination = "local/db.env"
env = true
}
}
}
}

View file

@ -7,15 +7,12 @@ job "plausible" {
port "http" {
to = 8000
}
port "clickhouse" {
static = 8123
}
port "db" {
static = 5432
static = 8123
}
}
task "app" {
task "plausible" {
service {
name = "plausible"
port = "http"
@ -38,11 +35,8 @@ job "plausible" {
driver = "docker"
config {
image = "ghcr.io/plausible/community-edition:v2.1"
image = "ghcr.io/plausible/community-edition:v2.1.1"
ports = ["http"]
volumes = [
"/storage/nomad/${NOMAD_JOB_NAME}/${NOMAD_TASK_NAME}:/var/lib/plausible"
]
command = "/bin/sh"
args = ["-c", "sleep 10 && /entrypoint.sh db migrate && /entrypoint.sh run"]
@ -50,8 +44,6 @@ job "plausible" {
template {
data = <<EOH
TMPDIR=/var/lib/plausible/tmp
BASE_URL=https://plausible.redbrick.dcu.ie
SECRET_KEY_BASE={{ key "plausible/secret" }}
TOTP_VAULT_KEY={{ key "plausible/totp/key" }}
@ -65,8 +57,8 @@ GOOGLE_CLIENT_ID={{ key "plausible/google/client_id" }}
GOOGLE_CLIENT_SECRET={{ key "plausible/google/client_secret" }}
# Database settings
DATABASE_URL=postgres://{{ key "plausible/db/user" }}:{{ key "plausible/db/password" }}@{{ env "NOMAD_ADDR_db" }}/{{ key "plausible/db/name" }}
CLICKHOUSE_DATABASE_URL=http://{{ env "NOMAD_ADDR_clickhouse" }}/plausible_events_db
DATABASE_URL=postgres://{{ key "plausible/db/user" }}:{{ key "plausible/db/password" }}@postgres.service.consul:5432/{{ key "plausible/db/name" }}
CLICKHOUSE_DATABASE_URL=http://{{ env "NOMAD_ADDR_db" }}/plausible_events_db
# Email settings
MAILER_NAME="Redbrick Plausible"
@ -88,43 +80,24 @@ EOH
}
}
task "db" {
driver = "docker"
config {
image = "postgres:17-alpine"
ports = ["db"]
volumes = [
"/storage/nomad/${NOMAD_JOB_NAME}/${NOMAD_TASK_NAME}:/var/lib/postgresql/data",
]
}
template {
data = <<EOH
POSTGRES_PASSWORD={{ key "plausible/db/password" }}
POSTGRES_USER={{ key "plausible/db/user" }}
POSTGRES_NAME={{ key "plausible/db/name" }}
EOH
destination = "local/db.env"
env = true
}
}
task "clickhouse" {
constraint {
attribute = "${attr.unique.hostname}"
value = "chell"
}
service {
name = "plausible-clickhouse"
port = "clickhouse"
port = "db"
}
driver = "docker"
config {
image = "clickhouse/clickhouse-server:24.3.3.102-alpine"
ports = ["clickhouse"]
ports = ["db"]
volumes = [
"/storage/nomad/${NOMAD_JOB_NAME}/${NOMAD_TASK_NAME}:/var/lib/clickhouse",
"/opt/plausible/clickhouse:/var/lib/clickhouse",
"local/clickhouse.xml:/etc/clickhouse-server/config.d/logging.xml:ro",
"local/clickhouse-user-config.xml:/etc/clickhouse-server/users.d/logging.xml:ro"
]
@ -167,7 +140,7 @@ EOH
}
resources {
memory = 1000
memory = 800
}
}
}

View file

@ -1,4 +1,4 @@
job "paperless-backup" {
job "postgres-backup" {
datacenters = ["aperture"]
type = "batch"
@ -20,17 +20,17 @@ job "paperless-backup" {
data = <<EOH
#!/bin/bash
file=/storage/backups/nomad/paperless/postgresql-paperless-$(date +%Y-%m-%d_%H-%M-%S).sql
file=/storage/backups/nomad/postgres/postgres-$(date +%Y-%m-%d_%H-%M-%S).sql
mkdir -p /storage/backups/nomad/paperless
mkdir -p /storage/backups/nomad/postgres
alloc_id=$(nomad job status paperless | grep running | tail -n 1 | cut -d " " -f 1)
alloc_id=$(nomad job status postgres | grep running | tail -n 1 | cut -d " " -f 1)
job_name=$(echo ${NOMAD_JOB_NAME} | cut -d "/" -f 1)
nomad alloc exec -task db $alloc_id pg_dumpall -U {{ key "paperless/db/user" }} > "${file}"
nomad alloc exec $alloc_id pg_dumpall -U {{ key "postgres/username/root" }} > "${file}"
find /storage/backups/nomad/paperless/postgresql-paperless* -ctime +3 -exec rm {} \; || true
find /storage/backups/nomad/postgres/hedgedoc/postgres* -ctime +3 -exec rm {} \; || true
if [ -s "$file" ]; then # check if file exists and is not empty
echo "Backup successful"

View file

@ -0,0 +1,96 @@
job "postgres" {
datacenters = ["aperture"]
constraint {
attribute = "${attr.unique.hostname}"
value = "wheatley"
}
group "db" {
network {
port "db" {
static = 5432
}
}
task "postgres-db" {
driver = "docker"
template {
data = <<EOH
POSTGRES_PASSWORD="{{ key "postgres/password/root" }}"
POSTGRES_USER="{{ key "postgres/username/root" }}"
EOH
destination = "local/file.env"
env = true
}
config {
image = "postgres:latest"
ports = ["db"]
volumes = [
"/opt/postgres:/var/lib/postgresql/data",
"local/postgresql.conf:/etc/postgres/postgresql.conf",
"local/pg_hba.conf:/pg_hba.conf",
]
}
template {
data = <<EOH
max_connections = 100
shared_buffers = 2GB
effective_cache_size = 6GB
maintenance_work_mem = 512MB
checkpoint_completion_target = 0.9
wal_buffers = 16MB
default_statistics_target = 100
random_page_cost = 1.1
effective_io_concurrency = 200
work_mem = 5242kB
min_wal_size = 1GB
max_wal_size = 4GB
max_worker_processes = 4
max_parallel_workers_per_gather = 2
max_parallel_workers = 4
max_parallel_maintenance_workers = 2
hba_file = "/pg_hba.conf"
EOH
destination = "local/postgresql.conf"
}
template {
data = <<EOH
local all all trust
host all all 127.0.0.1/32 trust
host all all ::1/128 trust
local replication all trust
host replication all 127.0.0.1/32 trust
host replication all ::1/128 trust
host all all all scram-sha-256
EOH
destination = "local/pg_hba.conf"
}
resources {
cpu = 400
memory = 800
}
service {
name = "postgres"
port = "db"
check {
type = "tcp"
interval = "2s"
timeout = "2s"
}
}
}
}
}

View file

@ -1,50 +0,0 @@
job "privatebin-backup" {
datacenters = ["aperture"]
type = "batch"
periodic {
crons = ["0 */3 * * * *"]
prohibit_overlap = true
}
group "db-backup" {
task "postgres-backup" {
driver = "raw_exec"
config {
command = "/bin/bash"
args = ["local/script.sh"]
}
template {
data = <<EOH
#!/bin/bash
file=/storage/backups/nomad/privatebin/postgresql-privatebin-$(date +%Y-%m-%d_%H-%M-%S).sql
mkdir -p /storage/backups/nomad/privatebin
alloc_id=$(nomad job status privatebin | grep running | tail -n 1 | cut -d " " -f 1)
job_name=$(echo ${NOMAD_JOB_NAME} | cut -d "/" -f 1)
nomad alloc exec -task db $alloc_id pg_dumpall -U {{ key "privatebin/db/user" }} > "${file}"
find /storage/backups/nomad/privatebin/postgresql-privatebin* -ctime +3 -exec rm {} \; || true
if [ -s "$file" ]; then # check if file exists and is not empty
echo "Backup successful"
exit 0
else
rm $file
curl -H "Content-Type: application/json" -d \
'{"content": "<@&585512338728419341> `PostgreSQL` backup for **'"${job_name}"'** has just **FAILED**\nFile name: `'"$file"'`\nDate: `'"$(TZ=Europe/Dublin date)"'`\nTurn off this script with `nomad job stop '"${job_name}"'` \n\n## Remember to restart this backup job when fixed!!!"}' \
{{ key "postgres/webhook/discord" }}
fi
EOH
destination = "local/script.sh"
}
}
}
}

View file

@ -10,9 +10,6 @@ job "privatebin" {
port "http" {
to = 8080
}
port "db" {
to = 5432
}
}
service {
@ -28,7 +25,7 @@ job "privatebin" {
tags = [
"traefik.enable=true",
"traefik.http.routers.privatebin.rule=Host(`paste.redbrick.dcu.ie`) || Host(`paste.rb.dcu.ie`)",
"traefik.http.routers.privatebin.rule=Host(`paste.rb.dcu.ie`) || Host(`paste.redbrick.dcu.ie`)",
"traefik.http.routers.privatebin.entrypoints=web,websecure",
"traefik.http.routers.privatebin.tls.certresolver=lets-encrypt",
]
@ -45,10 +42,15 @@ job "privatebin" {
"local/conf.php:/srv/data/conf.php",
]
}
env {
TZ = "Europe/Dublin"
PHP_TZ = "Europe/Dublin"
CONFIG_PATH = "/srv/data/"
template {
destination = "local/.env"
env = true
change_mode = "restart"
data = <<EOH
TZ=Europe/Dublin
PHP_TZ=Europe/Dublin
CONFIG_PATH=/srv/data/
EOH
}
template {
@ -57,7 +59,7 @@ job "privatebin" {
[main]
name = "Redbrick PasteBin"
basepath = "https://paste.redbrick.dcu.ie/"
basepath = "https://paste.rb.dcu.ie/"
discussion = true
@ -183,36 +185,13 @@ batchsize = 10
[model]
class = Database
[model_options]
dsn = "pgsql:host={{ env "NOMAD_ADDR_db" }};dbname={{ key "privatebin/db/name" }}"
tbl = "{{ key "privatebin/db/name" }}" ; table prefix
dsn = "pgsql:host=postgres.service.consul;dbname={{ key "privatebin/db/name" }}"
tbl = "privatebin_" ; table prefix
usr = "{{ key "privatebin/db/user" }}"
pwd = "{{ key "privatebin/db/password" }}"
opt[12] = true ; PDO::ATTR_PERSISTENT ; use persistent connections - default
EOH
}
}
task "db" {
driver = "docker"
config {
image = "postgres:17-alpine"
ports = ["db"]
volumes = [
"/storage/nomad/${NOMAD_JOB_NAME}/${NOMAD_TASK_NAME}:/var/lib/postgresql/data",
]
}
template {
data = <<EOH
POSTGRES_PASSWORD={{ key "privatebin/db/password" }}
POSTGRES_USER={{ key "privatebin/db/user" }}
POSTGRES_NAME={{ key "privatebin/db/name" }}
EOH
destination = "local/db.env"
env = true
}
}
}
}

View file

@ -1,50 +0,0 @@
job "vaultwarden-backup" {
datacenters = ["aperture"]
type = "batch"
periodic {
crons = ["0 */3 * * * *"]
prohibit_overlap = true
}
group "db-backup" {
task "postgres-backup" {
driver = "raw_exec"
config {
command = "/bin/bash"
args = ["local/script.sh"]
}
template {
data = <<EOH
#!/bin/bash
file=/storage/backups/nomad/vaultwarden/postgresql-vaultwarden-$(date +%Y-%m-%d_%H-%M-%S).sql
mkdir -p /storage/backups/nomad/vaultwarden
alloc_id=$(nomad job status vaultwarden | grep running | tail -n 1 | cut -d " " -f 1)
job_name=$(echo ${NOMAD_JOB_NAME} | cut -d "/" -f 1)
nomad alloc exec -task db $alloc_id pg_dumpall -U {{ key "vaultwarden/db/user" }} > "${file}"
find /storage/backups/nomad/vaultwarden/postgresql-vaultwarden* -ctime +3 -exec rm {} \; || true
if [ -s "$file" ]; then # check if file exists and is not empty
echo "Backup successful"
exit 0
else
rm $file
curl -H "Content-Type: application/json" -d \
'{"content": "<@&585512338728419341> `PostgreSQL` backup for **'"${job_name}"'** has just **FAILED**\nFile name: `'"$file"'`\nDate: `'"$(TZ=Europe/Dublin date)"'`\nTurn off this script with `nomad job stop '"${job_name}"'` \n\n## Remember to restart this backup job when fixed!!!"}' \
{{ key "postgres/webhook/discord" }}
fi
EOH
destination = "local/script.sh"
}
}
}
}

View file

@ -9,9 +9,6 @@ job "vaultwarden" {
port "http" {
to = 80
}
port "db" {
to = 5432
}
}
service {
@ -34,15 +31,14 @@ job "vaultwarden" {
ports = ["http"]
volumes = [
"/storage/nomad/${NOMAD_JOB_NAME}:/data",
"/etc/localtime:/etc/localtime:ro"
"/storage/nomad/vaultwarden:/data"
]
}
template {
data = <<EOF
DOMAIN=https://vault.redbrick.dcu.ie
DATABASE_URL=postgresql://{{ key "vaultwarden/db/user" }}:{{ key "vaultwarden/db/password" }}@{{ env "NOMAD_ADDR_db" }}/{{ key "vaultwarden/db/name" }}
DATABASE_URL=postgresql://{{ key "vaultwarden/db/user" }}:{{ key "vaultwarden/db/password" }}@postgres.service.consul:5432/{{ key "vaultwarden/db/name" }}
SIGNUPS_ALLOWED=false
INVITATIONS_ALLOWED=true
@ -59,37 +55,14 @@ EOF
destination = "local/env"
env = true
}
# These yubico variables are not necessary for yubikey support, only to verify the keys with yubico.
#YUBICO_CLIENT_ID={{ key "vaultwarden/yubico/client_id" }}
#YUBICO_SECRET_KEY={{ key "vaultwarden/yubico/secret_key" }}
# These yubico variables are not necessary for yubikey support, only to verify the keys with yubico.
#YUBICO_CLIENT_ID={{ key "vaultwarden/yubico/client_id" }}
#YUBICO_SECRET_KEY={{ key "vaultwarden/yubico/secret_key" }}
resources {
cpu = 500
memory = 500
}
}
task "db" {
driver = "docker"
config {
image = "postgres:17-alpine"
ports = ["db"]
volumes = [
"/storage/nomad/${NOMAD_JOB_NAME}/${NOMAD_TASK_NAME}:/var/lib/postgresql/data",
]
}
template {
data = <<EOH
POSTGRES_PASSWORD={{ key "vaultwarden/db/password" }}
POSTGRES_USER={{ key "vaultwarden/db/user" }}
POSTGRES_NAME={{ key "vaultwarden/db/name" }}
EOH
destination = "local/db.env"
env = true
}
}
}
}

View file

@ -41,7 +41,7 @@ $wgDBpassword = "{{ key "mediawiki/db/password" }}";
# MySQL specific settings
$wgDBprefix = "rbwiki_";
# MySQL table options to use during installation or update
$wgDBTableOptions = "ENGINE=InnoDB, DEFAULT CHARSET=utf8mb4";
$wgDBTableOptions = "ENGINE=InnoDB, DEFAULT CHARSET=binary";
## Shared memory settings
$wgMainCacheType = CACHE_NONE;
@ -89,15 +89,11 @@ wfLoadSkin( 'Vector' );
wfLoadSkin( 'Citizen' );
wfLoadSkin( 'Timeless' );
wfLoadSkin( 'MinervaNeue' );
wfLoadSkin( 'Medik' );
$wgCitizenThemeColor = "#a81e22";
$wgCitizenShowPageTools = "permission";
$wgCitizenSearchDescriptionSource = "pagedescription";
$wgMedikColor = "#a81e22";
$wgMedikShowLogo = "main";
$wgLocalisationUpdateDirectory = "$IP/cache";
# load extensions

View file

@ -17,7 +17,7 @@ job "mediawiki-backup" {
}
template {
data = <<EOH
data = <<EOH
#!/bin/bash
file=/storage/backups/nomad/wiki/mysql/rbwiki-mysql-$(date +%Y-%m-%d_%H-%M-%S).sql
@ -30,7 +30,7 @@ job_name=$(echo ${NOMAD_JOB_NAME} | cut -d "/" -f 1)
nomad alloc exec -task rbwiki-db $alloc_id mariadb-dump -u {{ key "mediawiki/db/username" }} -p'{{ key "mediawiki/db/password"}}' {{ key "mediawiki/db/name" }} > "${file}"
find /storage/backups/nomad/wiki/mysql/rbwiki-mysql* -ctime +30 -exec rm {} \; || true
find /storage/backups/nomad/wiki/mysql/rbwiki-mysql* -ctime +3 -exec rm {} \; || true
if [ -s "$file" ]; then # check if file exists and is not empty
echo "Backup successful"
@ -56,7 +56,7 @@ EOH
}
template {
data = <<EOH
data = <<EOH
#!/bin/bash
file=/storage/backups/nomad/wiki/xml/rbwiki-dump-$(date +%Y-%m-%d_%H-%M-%S).xml

View file

@ -1,6 +1,6 @@
job "mediawiki" {
datacenters = ["aperture"]
type = "service"
type = "service"
meta {
domain = "wiki.redbrick.dcu.ie"
@ -27,10 +27,10 @@ job "mediawiki" {
port = "http"
check {
type = "http"
path = "/Main_Page"
type = "http"
path = "/Main_Page"
interval = "10s"
timeout = "5s"
timeout = "5s"
}
tags = [
@ -61,11 +61,11 @@ job "mediawiki" {
]
}
resources {
cpu = 200
memory = 100
}
cpu = 200
memory = 100
}
template {
data = <<EOH
data = <<EOH
# user www-data www-data;
error_log /dev/stderr error;
events {
@ -139,9 +139,9 @@ EOH
}
resources {
cpu = 4000
memory = 1200
}
cpu = 4000
memory = 1200
}
template {
data = <<EOH
@ -179,7 +179,7 @@ EOH
}
template {
data = file("LocalSettings.php")
data = file("LocalSettings.php")
destination = "local/LocalSettings.php"
}
}
@ -189,10 +189,10 @@ EOH
port = "db"
check {
name = "mariadb_probe"
type = "tcp"
name = "mariadb_probe"
type = "tcp"
interval = "10s"
timeout = "2s"
timeout = "2s"
}
}
@ -213,18 +213,6 @@ EOH
template {
data = <<EOH
[mysqld]
# Ensure full UTF-8 support
character-set-server = utf8mb4
collation-server = utf8mb4_unicode_ci
skip-character-set-client-handshake
# Fix 1000-byte key length issue
innodb_large_prefix = 1
innodb_file_format = Barracuda
innodb_file_per_table = 1
innodb_default_row_format = dynamic
# Performance optimizations (Keep these based on your system)
max_connections = 100
key_buffer_size = 2G
query_cache_size = 0
@ -236,23 +224,22 @@ innodb_io_capacity = 200
tmp_table_size = 5242K
max_heap_table_size = 5242K
innodb_log_buffer_size = 16M
innodb_file_per_table = 1
bind-address = 0.0.0.0
# Logging
slow_query_log = 1
slow_query_log_file = /var/log/mysql/slow.log
long_query_time = 1
# Network
bind-address = 0.0.0.0
EOH
destination = "local/conf.cnf"
}
resources {
cpu = 800
memory = 2500
}
cpu = 800
memory = 1200
}
template {
data = <<EOH
@ -263,7 +250,7 @@ MYSQL_RANDOM_ROOT_PASSWORD=yes
EOH
destination = "local/.env"
env = true
env = true
}
}
}

View file

@ -12,7 +12,7 @@ job "ams-amikon" {
network {
port "http" {
to = 3000
to = 80
}
}
@ -20,45 +20,35 @@ job "ams-amikon" {
port = "http"
check {
type = "http"
path = "/"
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.ams-amikon.rule=Host(`amikon.me`) || Host(`www.amikon.me`)",
"traefik.http.routers.ams-amikon.entrypoints=web,websecure",
"traefik.http.routers.ams-amikon.tls.certresolver=lets-encrypt",
"traefik.http.routers.ams-amikon.middlewares=amikon-www-redirect",
"traefik.http.middlewares.amikon-www-redirect.redirectregex.regex=^https?://www.amikon.me/(.*)",
"traefik.http.middlewares.amikon-www-redirect.redirectregex.replacement=https://amikon.me/$${1}",
"traefik.http.middlewares.amikon-www-redirect.redirectregex.permanent=true",
"traefik.http.routers.ams-amikon.middlewares=www-redirect",
"traefik.http.middlewares.www-redirect.redirectregex.regex=^https?://www.amikon.me/(.*)",
"traefik.http.middlewares.www-redirect.redirectregex.replacement=https://amikon.me/$${1}",
"traefik.http.middlewares.www-redirect.redirectregex.permanent=true",
]
}
task "amikon-node" {
task "amikon-nginx" {
driver = "docker"
config {
image = "ghcr.io/dcuams/amikon-site-v2:latest"
image = "ghcr.io/dcuams/amikon-site-v2:latest"
force_pull = true
ports = ["http"]
}
template {
data = <<EOF
EMAIL={{ key "ams/amikon/email/user" }}
EMAIL_PASS={{ key "ams/amikon/email/password" }}
TO_EMAIL={{ key "ams/amikon/email/to" }}
EOF
destination = ".env"
env = true
ports = ["http"]
}
resources {
cpu = 800
memory = 500
cpu = 100
memory = 50
}
}
}

View file

@ -1,107 +0,0 @@
job "dcusr-listmonk" {
datacenters = ["aperture"]
type = "service"
meta {
domain = "lists.solarracing.ie"
}
group "listmonk" {
network {
port "http" {
}
port "db" {
to = 5432
}
}
service {
name = "listmonk"
port = "http"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.port=${NOMAD_PORT_http}",
"traefik.http.routers.dcusr-listmonk.entrypoints=web,websecure",
"traefik.http.routers.dcusr-listmonk.rule=Host(`${NOMAD_META_domain}`)",
"traefik.http.routers.dcusr-listmonk.tls=true",
"traefik.http.routers.dcusr-listmonk.tls.certresolver=lets-encrypt",
]
}
task "app" {
driver = "docker"
config {
image = "listmonk/listmonk:latest"
ports = ["http"]
entrypoint = ["./listmonk", "--static-dir=/listmonk/static"]
volumes = [
"/storage/nomad/dcusr-listmonk/static:/listmonk/static",
"/storage/nomad/dcusr-listmonk/postgres/:/var/lib/postgresql/data",
"local/config.toml:/listmonk/config.toml"
]
}
resources {
cpu = 1000
memory = 500
}
template {
data = <<EOH
[app]
address = "0.0.0.0:{{ env "NOMAD_PORT_http" }}"
admin_username = "{{ key "dcusr/listmonk/admin/username" }}"
admin_password = "{{ key "dcusr/listmonk/admin/password" }}"
# Database.
[db]
host = "{{ env "NOMAD_HOST_IP_db" }}"
port = {{ env "NOMAD_HOST_PORT_db" }}
user = "{{ key "dcusr/listmonk/db/username" }}"
password = "{{ key "dcusr/listmonk/db/password" }}"
database = "{{ key "dcusr/listmonk/db/name" }}"
ssl_mode = "disable"
max_open = 25
max_idle = 25
max_lifetime = "300s"
EOH
destination = "local/config.toml"
}
}
task "listmonk-db" {
driver = "docker"
config {
image = "postgres:17-alpine"
ports = ["db"]
volumes = [
"/storage/nomad/dcusr-listmonk/postgres:/var/lib/postgresql/data"
]
}
template {
data = <<EOH
POSTGRES_DB = "{{ key "dcusr/listmonk/db/name" }}"
POSTGRES_USER = "{{ key "dcusr/listmonk/db/username" }}"
POSTGRES_PASSWORD = "{{ key "dcusr/listmonk/db/password" }}"
EOH
destination = "local/db.env"
env = true
}
}
}
}

View file

@ -126,7 +126,7 @@ EOH
driver = "docker"
config {
image = "postgres:16-alpine"
image = "postgres:alpine"
ports = ["db"]
volumes = [

View file

@ -53,11 +53,6 @@ DOCKER_PASS={{ key "dcusr/ghcr/password" }}
TO_EMAIL={{ key "dcusr/nodemailer/to" }}
EMAIL={{ key "dcusr/nodemailer/from" }}
EMAIL_PASS={{ key "dcusr/nodemailer/password" }}
LISTMONK_ENDPOINT={{ key "dcusr/listmonk/endpoint" }}
LISTMONK_USERNAME={{ key "dcusr/listmonk/username" }}
LISTMONK_PASSWORD={{ key "dcusr/listmonk/password" }}
LISTMONK_LIST_IDS={{ key "dcusr/listmonk/list/id" }}
RECAPTCHA_SECRET_KEY={{ key "dcusr/recaptcha/secret/key" }}
EOH
}
}

View file

@ -1,47 +0,0 @@
job "esports-discord-bot" {
datacenters = ["aperture"]
type = "service"
group "esports-bot" {
count = 1
task "esports-bot" {
driver = "docker"
config {
image = "ghcr.io/aydenjahola/discord-multipurpose-bot:main"
force_pull = true
}
resources {
cpu = 500
memory = 256
}
template {
data = <<EOH
BOT_TOKEN={{ key "socs/esports/bot/discord/token" }}
EMAIL_NAME={{ key "socs/esports/bot/email/name" }}
EMAIL_PASS={{ key "socs/esports/bot/email/pass" }}
EMAIL_USER={{key "socs/esports/bot/email/user" }}
MONGODB_URI={{key "socs/esports/bot/mongodb/uri"}}
RAPIDAPI_KEY={{ key "socs/esports/bot/rapidapi/key" }}
TRACKER_API_KEY={{ key "socs/esports/bot/trackerapi/key" }}
TRACKER_API_URL={{ key "socs/esports/bot/trackerapi/url" }}
WORDNIK_API_KEY={{key "socs/esports/bot/wordnikapi/key" }}
HUGGING_FACE_API_KEY={{ key "socs/esports/bot/huggingface/key" }}
RCON_HOST=esports-mc-rcon.service.consul
# https://discuss.hashicorp.com/t/passing-registered-ip-and-port-from-consul-to-env-nomad-job-section/35647
{{ range service "esports-mc-rcon" }}
RCON_PORT={{ .Port }}{{ end }}
RCON_PASSWORD={{ key "games/mc/esports-mc/rcon/password" }}
EOH
destination = "local/.env"
env = true
}
}
}
}

View file

@ -46,6 +46,9 @@ job "mps-site" {
username = "${DOCKER_USER}"
password = "${DOCKER_PASS}"
}
volumes = [
"local/hosts:/etc/hosts",
]
}
template {
@ -57,6 +60,23 @@ EOH
env = true
}
template {
data = <<EOF
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
172.17.0.17 {{ env "NOMAD_TASK_NAME" }}
# use internal IP for thecollegeview.ie as external IP isn't routable
192.168.0.158 thecollegeview.ie
192.168.0.158 www.thecollegeview.ie
EOF
destination = "local/hosts"
}
resources {
cpu = 300
memory = 500

View file

@ -1,49 +0,0 @@
job "mps-thecollegeview-backup" {
datacenters = ["aperture"]
type = "batch"
periodic {
crons = ["0 */3 * * * *"]
prohibit_overlap = true
}
group "db-backup" {
task "mysql-backup" {
driver = "raw_exec"
config {
command = "/bin/bash"
args = ["local/mysql-backup.sh"]
}
template {
data = <<EOH
#!/bin/bash
file=/storage/backups/nomad/mps-thecollegeview/mysql/tcv-mysql-$(date +%Y-%m-%d_%H-%M-%S).sql
mkdir -p /storage/backups/nomad/mps-thecollegeview/mysql
alloc_id=$(nomad job status mps-thecollegeview | grep running | tail -n 1 | cut -d " " -f 1)
job_name=$(echo ${NOMAD_JOB_NAME} | cut -d "/" -f 1)
nomad alloc exec -task tcv-db $alloc_id mariadb-dump -u {{ key "mps/thecollegeview/db/username" }} -p'{{ key "mps/thecollegeview/db/password"}}' {{ key "mps/thecollegeview/db/name" }} > "${file}"
find /storage/backups/nomad/mps-thecollegeview/mysql/tcv-mysql* -ctime +3 -exec rm {} \; || true
if [ -s "$file" ]; then # check if file exists and is not empty
echo "Backup successful"
exit 0
else
rm $file
curl -H "Content-Type: application/json" -d \
'{"content": "# <@&585512338728419341> `MySQL` backup for **'"${job_name}"'** has just **FAILED**\nFile name: `'"$file"'`\nDate: `'"$(TZ=Europe/Dublin date)"'`\nTurn off this script with `nomad job stop '"${job_name}"'` \n\n## Remember to restart this backup job when fixed!!!"}' \
{{ key "mysql/webhook/discord" }}
fi
EOH
destination = "local/mysql-backup.sh"
}
}
}
}

View file

@ -1,257 +0,0 @@
job "mps-thecollegeview" {
datacenters = ["aperture"]
type = "service"
meta {
domain = "thecollegeview.ie"
}
group "tcv" {
count = 1
network {
mode = "bridge"
port "http" {
to = 80
}
port "fpm" {
to = 9000
}
port "db" {
to = 3306
}
port "redis" {
to = 6379
}
}
service {
name = "tcv-web"
port = "http"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "5s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.tcv.rule=Host(`${NOMAD_META_domain}`)",
"traefik.http.routers.tcv.entrypoints=web,websecure",
"traefik.http.routers.tcv.tls.certresolver=lets-encrypt",
]
}
task "tcv-nginx" {
driver = "docker"
config {
image = "nginx:alpine"
ports = ["http"]
volumes = [
"local/nginx.conf:/etc/nginx/nginx.conf",
"/storage/nomad/mps-thecollegeview:/var/www/html/",
]
group_add = [82] # www-data in alpine
}
resources {
cpu = 200
memory = 100
}
template {
data = <<EOH
# user www-data www-data;
error_log /dev/stderr error;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
server_tokens off;
error_log /dev/stderr error;
access_log /dev/stdout;
charset utf-8;
server {
server_name {{ env "NOMAD_META_domain" }};
listen 80;
listen [::]:80;
root /var/www/html;
index index.php index.html index.htm;
client_max_body_size 5m;
client_body_timeout 60;
# NOTE: Not used here, WP super cache rule used instead
# # Pass all folders to FPM
# location / {
# try_files $uri $uri/ /index.php?$args;
# }
# Pass the PHP scripts to FastCGI server
location ~ \.php$ {
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_pass {{ env "NOMAD_ADDR_fpm" }};
fastcgi_index index.php;
}
location ~ /\.ht {
deny all;
}
# WP Super Cache rules.
set $cache_uri $request_uri;
# POST requests and urls with a query string should always go to PHP
if ($request_method = POST) {
set $cache_uri 'null cache';
}
if ($query_string != "") {
set $cache_uri 'null cache';
}
# Don't cache uris containing the following segments
if ($request_uri ~* "(/wp-admin/|/xmlrpc.php|/wp-(app|cron|login|register|mail).php|wp-.*.php|/feed/|index.php|wp-comments-popup.php|wp-links-opml.php|wp-locations.php|sitemap(_index)?.xml|[a-z0-9_-]+-sitemap([0-9]+)?.xml)") {
set $cache_uri 'null cache';
}
# Don't use the cache for logged in users or recent commenters
if ($http_cookie ~* "comment_author|wordpress_[a-f0-9]+|wp-postpass|wordpress_logged_in") {
set $cache_uri 'null cache';
}
# Use cached or actual file if they exists, otherwise pass request to WordPress
location / {
try_files /wp-content/cache/supercache/$http_host/$cache_uri/index.html $uri $uri/ /index.php?$args ;
}
}
}
EOH
destination = "local/nginx.conf"
}
}
task "tcv-phpfpm" {
driver = "docker"
config {
image = "wordpress:php8.3-fpm-alpine"
ports = ["fpm"]
volumes = [
"/storage/nomad/mps-thecollegeview:/var/www/html/",
"local/custom.ini:/usr/local/etc/php/conf.d/custom.ini",
]
}
resources {
cpu = 800
memory = 500
}
template {
data = <<EOH
WORDPRESS_DB_HOST={{ env "NOMAD_ADDR_db" }}
WORDPRESS_DB_USER={{ key "mps/thecollegeview/db/username" }}
WORDPRESS_DB_PASSWORD={{ key "mps/thecollegeview/db/password" }}
WORDPRESS_DB_NAME={{ key "mps/thecollegeview/db/name" }}
WORDPRESS_TABLE_PREFIX=wp_2
WORDPRESS_CONFIG_EXTRA="define('WP_REDIS_HOST', '{{ env "NOMAD_ADDR_redis" }}');"
EOH
destination = "local/.env"
env = true
}
template {
data = <<EOH
pm.max_children = 10
upload_max_filesize = 64M
post_max_size = 64M
EOH
destination = "local/custom.ini"
}
}
service {
name = "tcv-db"
port = "db"
}
task "tcv-db" {
driver = "docker"
config {
image = "mariadb"
ports = ["db"]
volumes = [
"/storage/nomad/mps-thecollegeview/db:/var/lib/mysql",
]
}
template {
data = <<EOH
[mysqld]
max_connections = 100
key_buffer_size = 2G
query_cache_size = 0
innodb_buffer_pool_size = 6G
innodb_log_file_size = 512M
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
innodb_io_capacity = 200
tmp_table_size = 5242K
max_heap_table_size = 5242K
innodb_log_buffer_size = 16M
innodb_file_per_table = 1
bind-address = 0.0.0.0
# Logging
slow_query_log = 1
slow_query_log_file = /var/log/mysql/slow.log
long_query_time = 1
EOH
destination = "local/conf.cnf"
}
resources {
cpu = 800
memory = 800
}
template {
data = <<EOH
MYSQL_DATABASE={{ key "mps/thecollegeview/db/name" }}
MYSQL_USER={{ key "mps/thecollegeview/db/username" }}
MYSQL_PASSWORD={{ key "mps/thecollegeview/db/password" }}
MYSQL_RANDOM_ROOT_PASSWORD=yes
EOH
destination = "local/.env"
env = true
}
}
task "redis" {
driver = "docker"
config {
image = "redis:latest"
ports = ["redis"]
}
resources {
cpu = 200
}
}
}
}

View file

@ -1,49 +0,0 @@
job "style-thelook-backup" {
datacenters = ["aperture"]
type = "batch"
periodic {
crons = ["0 */3 * * * *"]
prohibit_overlap = true
}
group "db-backup" {
task "mysql-backup" {
driver = "raw_exec"
config {
command = "/bin/bash"
args = ["local/mysql-backup.sh"]
}
template {
data = <<EOH
#!/bin/bash
file=/storage/backups/nomad/style-thelook/mysql/thelook-mysql-$(date +%Y-%m-%d_%H-%M-%S).sql
mkdir -p /storage/backups/nomad/style-thelook/mysql
alloc_id=$(nomad job status style-thelook | grep running | tail -n 1 | cut -d " " -f 1)
job_name=$(echo ${NOMAD_JOB_NAME} | cut -d "/" -f 1)
nomad alloc exec -task thelook-db $alloc_id mariadb-dump -u {{ key "style/thelook/db/username" }} -p'{{ key "style/thelook/db/password"}}' {{ key "style/thelook/db/name" }} > "${file}"
find /storage/backups/nomad/style-thelook/mysql/thelook-mysql* -ctime +3 -exec rm {} \; || true
if [ -s "$file" ]; then # check if file exists and is not empty
echo "Backup successful"
exit 0
else
rm $file
curl -H "Content-Type: application/json" -d \
'{"content": "# <@&585512338728419341> `MySQL` backup for **'"${job_name}"'** has just **FAILED**\nFile name: `'"$file"'`\nDate: `'"$(TZ=Europe/Dublin date)"'`\nTurn off this script with `nomad job stop '"${job_name}"'` \n\n## Remember to restart this backup job when fixed!!!"}' \
{{ key "mysql/webhook/discord" }}
fi
EOH
destination = "local/mysql-backup.sh"
}
}
}
}

View file

@ -1,257 +0,0 @@
job "style-thelook" {
datacenters = ["aperture"]
type = "service"
meta {
domain = "thelookonline.dcu.ie"
}
group "thelook" {
count = 1
network {
mode = "bridge"
port "http" {
to = 80
}
port "fpm" {
to = 9000
}
port "db" {
to = 3306
}
port "redis" {
to = 6379
}
}
service {
name = "thelook-web"
port = "http"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "5s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.thelook.rule=Host(`${NOMAD_META_domain}`) || Host(`style.redbrick.dcu.ie`)",
"traefik.http.routers.thelook.entrypoints=web,websecure",
"traefik.http.routers.thelook.tls.certresolver=lets-encrypt",
]
}
task "thelook-nginx" {
driver = "docker"
config {
image = "nginx:alpine"
ports = ["http"]
volumes = [
"local/nginx.conf:/etc/nginx/nginx.conf",
"/storage/nomad/style-thelook:/var/www/html/",
]
group_add = [82] # www-data in alpine
}
resources {
cpu = 200
memory = 100
}
template {
data = <<EOH
# user www-data www-data;
error_log /dev/stderr error;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
server_tokens off;
error_log /dev/stderr error;
access_log /dev/stdout;
charset utf-8;
server {
server_name {{ env "NOMAD_META_domain" }};
listen 80;
listen [::]:80;
root /var/www/html;
index index.php index.html index.htm;
client_max_body_size 5m;
client_body_timeout 60;
# NOTE: Not used here, WP super cache rule used instead
# Pass all folders to FPM
# location / {
# try_files $uri $uri/ /index.php?$args;
# }
# Pass the PHP scripts to FastCGI server
location ~ \.php$ {
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_pass {{ env "NOMAD_ADDR_fpm" }};
fastcgi_index index.php;
}
location ~ /\.ht {
deny all;
}
# WP Super Cache rules.
set $cache_uri $request_uri;
# POST requests and urls with a query string should always go to PHP
if ($request_method = POST) {
set $cache_uri 'null cache';
}
if ($query_string != "") {
set $cache_uri 'null cache';
}
# Don't cache uris containing the following segments
if ($request_uri ~* "(/wp-admin/|/xmlrpc.php|/wp-(app|cron|login|register|mail).php|wp-.*.php|/feed/|index.php|wp-comments-popup.php|wp-links-opml.php|wp-locations.php|sitemap(_index)?.xml|[a-z0-9_-]+-sitemap([0-9]+)?.xml)") {
set $cache_uri 'null cache';
}
# Don't use the cache for logged in users or recent commenters
if ($http_cookie ~* "comment_author|wordpress_[a-f0-9]+|wp-postpass|wordpress_logged_in") {
set $cache_uri 'null cache';
}
# Use cached or actual file if they exists, otherwise pass request to WordPress
location / {
try_files /wp-content/cache/supercache/$http_host/$cache_uri/index.html $uri $uri/ /index.php?$args ;
}
}
}
EOH
destination = "local/nginx.conf"
}
}
task "thelook-phpfpm" {
driver = "docker"
config {
image = "wordpress:php8.3-fpm-alpine"
ports = ["fpm"]
volumes = [
"/storage/nomad/style-thelook:/var/www/html/",
"local/custom.ini:/usr/local/etc/php/conf.d/custom.ini",
]
}
resources {
cpu = 800
memory = 500
}
template {
data = <<EOH
WORDPRESS_DB_HOST={{ env "NOMAD_ADDR_db" }}
WORDPRESS_DB_USER={{ key "style/thelook/db/username" }}
WORDPRESS_DB_PASSWORD={{ key "style/thelook/db/password" }}
WORDPRESS_DB_NAME={{ key "style/thelook/db/name" }}
WORDPRESS_TABLE_PREFIX=wp_
WORDPRESS_CONFIG_EXTRA="define('WP_REDIS_HOST', '{{ env "NOMAD_ADDR_redis" }}');"
EOH
destination = "local/.env"
env = true
}
template {
data = <<EOH
pm.max_children = 10
upload_max_filesize = 64M
post_max_size = 64M
EOH
destination = "local/custom.ini"
}
}
service {
name = "thelook-db"
port = "db"
}
task "thelook-db" {
driver = "docker"
config {
image = "mariadb"
ports = ["db"]
volumes = [
"/storage/nomad/style-thelook/db:/var/lib/mysql",
]
}
template {
data = <<EOH
[mysqld]
max_connections = 100
key_buffer_size = 2G
query_cache_size = 0
innodb_buffer_pool_size = 6G
innodb_log_file_size = 512M
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
innodb_io_capacity = 200
tmp_table_size = 5242K
max_heap_table_size = 5242K
innodb_log_buffer_size = 16M
innodb_file_per_table = 1
bind-address = 0.0.0.0
# Logging
slow_query_log = 1
slow_query_log_file = /var/log/mysql/slow.log
long_query_time = 1
EOH
destination = "local/conf.cnf"
}
resources {
cpu = 800
memory = 800
}
template {
data = <<EOH
MYSQL_DATABASE={{ key "style/thelook/db/name" }}
MYSQL_USER={{ key "style/thelook/db/username" }}
MYSQL_PASSWORD={{ key "style/thelook/db/password" }}
MYSQL_RANDOM_ROOT_PASSWORD=yes
EOH
destination = "local/.env"
env = true
}
}
task "redis" {
driver = "docker"
config {
image = "redis:latest"
ports = ["redis"]
}
resources {
cpu = 200
}
}
}
}

View file

@ -1,36 +0,0 @@
job "urri-meetups-update" {
datacenters = ["aperture"]
type = "batch"
periodic {
crons = ["0 */6 * * * *"]
prohibit_overlap = true
}
group "urri-meetups-update" {
task "urri-meetups-update" {
driver = "raw_exec"
config {
command = "/bin/bash"
args = ["local/script.sh"]
}
template {
data = <<EOH
#!/bin/bash
# stop the urri-meetups job
nomad job stop urri-meetups
sleep 1
# revert the urri-meetups job to the previous version
# this will trigger a new deployment, which will pull the latest image
nomad job revert urri-meetups $(($(nomad job inspect urri-meetups | jq '.Job.Version')-1))
EOH
destination = "local/script.sh"
}
}
}
}

View file

@ -1,47 +0,0 @@
job "urri-meetups" {
datacenters = ["aperture"]
type = "service"
group "urri-meetups" {
count = 1
network {
port "http" {
to = 8000
}
}
service {
port = "http"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.urri-meetups.rule=Host(`urri-meetups.rb.dcu.ie`)",
"traefik.http.routers.urri-meetups.entrypoints=web,websecure",
"traefik.http.routers.urri-meetups.tls.certresolver=lets-encrypt",
]
}
task "web" {
driver = "docker"
config {
image = "ghcr.io/haefae222/pizza_app:latest"
ports = ["http"]
force_pull = true
}
resources {
cpu = 1000
memory = 800
}
}
}
}

View file

@ -1,61 +0,0 @@
job "cands-room-bookings" {
datacenters = ["aperture"]
type = "service"
meta {
git-sha = ""
}
group "clubsandsocs-room-bookings" {
count = 1
network {
port "http" {
to = 5000
}
}
service {
port = "http"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.clubsandsocs-room-bookings.rule=Host(`rooms.rb.dcu.ie`)",
"traefik.http.routers.clubsandsocs-room-bookings.entrypoints=web,websecure",
"traefik.http.routers.clubsandsocs-room-bookings.tls.certresolver=lets-encrypt",
]
}
task "web" {
driver = "docker"
config {
image = "ghcr.io/wizzdom/clubsandsocs-room-bookings:latest"
ports = ["http"]
force_pull = true
volumes = [
"local/.env:/app/.env"
]
}
template {
data = <<EOF
UPLOAD_FOLDER=uploads
SECRET_KEY={{ key "user-projects/wizzdom/clubsandsocs-room-bookings/secret" }}
EOF
destination = "local/.env"
}
resources {
cpu = 1000
memory = 800
}
}
}
}

View file

@ -38,7 +38,7 @@ alloc_id=$(nomad job status -verbose bastion-vm | grep running | tail -n 1 | cut
job_name=$(echo ${NOMAD_JOB_NAME} | cut -d "/" -f 1)
echo "Backing up alloc id: ${alloc_id} on: ${host} to ${path}/${file}..."
ssh -i {{ key "bastion-vm/service/key" }} {{ key "bastion-vm/service/user" }}@${host} "sudo cat /opt/nomad/alloc/${alloc_id}/bastion-vm/local/bastion-vm.qcow2" > ${path}/${file}
scp -B -i {{ key "bastion-vm/service/key" }} {{ key "bastion-vm/service/user" }}@${host}:/opt/nomad/alloc/${alloc_id}/bastion-vm/local/bastion-vm.qcow2 ${path}/${file}
find ${path}/bastion-vm-* -ctime +2 -exec rm {} \; || true