Compare commits

..

24 commits

Author SHA1 Message Date
wizzdom
7b769cebfe
wiki: use utf8 encoding, improve db performance, more backups (#100)
* wiki: use utf8 encoding, improve db performance, more backups

* add medik skin colour, logo
2025-03-01 00:51:17 +00:00
Ayden
07f1f032b7
Esports: update discord bot job and add minecraft server (#101)
* socs: update esports discord bot

* esports: add minecraft server job
2025-02-28 19:58:56 +00:00
wizzdom
44ac151512
add uptime kuma (#99)
* add uptime kuma

* uptime-kuma: move to monitoring/
2025-02-28 14:34:00 +00:00
wizzdom
12278b1b44
bastion-vm-backup: remove unreliable backups over scp (#98) 2025-02-28 03:03:06 +00:00
wizzdom
737dd00e06
hedgedoc: bump image version 2025-02-28 01:01:16 +00:00
wizzdom
cfaf7a4309
mediawiki: bump db RAM 2025-02-28 00:58:55 +00:00
wizzdom
b58c812a3e
Use separate DB for all services (#95)
* migrate vaultwarden to seperate db

* plausible: add separate db, move click mount

* privatebin: separate db, cleanup

* add privatebin backup job

* remove postgres job
2025-02-09 19:54:17 +00:00
wizzdom
fc337777cb
add C&S room bookings job (#93) 2025-02-06 12:47:36 +00:00
wizzdom
24911a2907
add redbrick subdomain for style (#94) 2025-02-06 12:44:47 +00:00
wizzdom
8819180c25
add uri-meetups job (#92) 2025-02-05 18:18:04 +00:00
wizzdom
ad4cfbbaf6
bump version, players, view distance (#89)
we have the RAM for it anyways ;)
2025-01-13 18:42:53 +00:00
wizzdom
d0b3c14a85
atlas: add www & www redirects (#88)
make more old links work again ;)
2025-01-04 01:03:16 +00:00
Gavin Holahan
2263558f4a
Moved The Look Online to aperature (#87)
Co-authored-by: Wizzdom <wizzdom@redbrick.dcu.ie>
2025-01-03 00:38:23 +00:00
wizzdom
14e0b7eae3
traefik: add dynamic redirects via consul kv (#85) 2024-12-29 17:59:30 +00:00
wizzdom
e951e1ba17
thecollegeview: much caches, permissions (#84)
- add `redis` object cache
- add `nginx` static page cache with WP Super Cache
- add `nginx` to `www-data` group to avoid permissions conflicts
- increase PHP `max_children`, `upload_max_filesize`, `post_max_size`
  configs
2024-12-15 18:33:44 +00:00
wizzdom
c169d75001
traefik: add ssh, voice, tracing, access log (#83)
mumble voice configs taken from here: https://github.com/DistroByte/nomad/blob/master/jobs/traefik.hcl

Co-authored-by: DistroByte <james@distrobyte.io>
2024-12-15 18:12:06 +00:00
wizzdom
b22f9d8b75
minecraft/vanilla: increase memory (#82) 2024-12-12 01:50:19 +00:00
wizzdom
9f16d94cbb
add thecollegeview.ie (#80)
* add thecollegeview.ie

* thecollegeview: migrate to phpfpm + nginx

* thecollegeview: pass rest api to phpfpm

* mps-site: remove hacky workaround for tcv

* thecollegeview: pass all dirs to phpfpm

* backup the correct db
2024-12-11 14:35:46 +00:00
wizzdom
37e6facab6
amikon: update to support node docker image (#81) 2024-12-10 23:27:40 +00:00
Ayden
f3e5ae5e2b
update db job to be tied to postgres alpine 16 (#78)
Co-authored-by: wizzdom <dom@wizzdom.xyz>
2024-12-02 22:55:48 +00:00
wizzdom
a464a915f0
hedgeodoc: add mount for banner, set default permission (#76) 2024-11-24 09:31:42 +00:00
wizzdom
d38f434a13
postgres-backup: delete old backups from the correct location (#77) 2024-11-24 09:31:28 +00:00
Ayden
6ae4ea0c8f
update env variables (#75) 2024-11-18 15:30:13 +00:00
Ayden
7ae45f6cd9
update env vars for solar racing website (#74) 2024-11-18 00:10:11 +00:00
29 changed files with 1193 additions and 203 deletions

View file

@ -0,0 +1,64 @@
job "esports-minecraft" {
datacenters = ["aperture"]
type = "service"
group "esports-mc" {
count = 1
network {
port "mc" {
to = 25565
}
port "rcon" {
to = 25575
}
}
service {
name = "esports-mc"
port = "mc"
}
service {
name = "esports-mc-rcon"
port = "rcon"
}
task "esports-minecraft" {
driver = "docker"
config {
image = "itzg/minecraft-server"
ports = ["mc", "rcon"]
volumes = [
"/storage/nomad/${NOMAD_TASK_NAME}:/data"
]
}
resources {
cpu = 5000 # 5000 MHz
memory = 20480 # 20 GB
}
template {
data = <<EOF
EULA = "TRUE"
TYPE = "PAPER"
VERSION = "1.21.4"
ICON = "https://liquipedia.net/commons/images/thumb/5/53/DCU_Esports_allmode.png/37px-DCU_Esports_allmode.png"
USE_AIKAR_FLAGS = true
MAX_MEMORY = 18G
MOTD = "Powered by Redbrick"
MAX_PLAYERS = "32"
VIEW_DISTANCE = "32"
ENABLE_RCON = true
RCON_PASSWORD = {{ key "games/mc/esports-mc/rcon/password" }}
# Auto-download plugins
SPIGET_RESOURCES=83581,62325,118271,28140,102931 # RHLeafDecay, GSit, GravesX, Luckperms, NoChatReport
MODRINTH_PROJECTS=datapack:no-enderman-grief,thizzyz-tree-feller,imageframe,bmarker,datapack:players-drop-heads,viaversion,viabackwards
EOF
destination = "local/.env"
env = true
}
}
}
}

View file

@ -50,20 +50,20 @@ job "minecraft-vanilla" {
resources {
cpu = 5000 # 5000 MHz
memory = 12288 # 12GB
memory = 20480 # 20 GB
}
template {
data = <<EOF
EULA = "TRUE"
TYPE = "PAPER"
VERSION = "1.21.1"
VERSION = "1.21.3"
ICON = "https://docs.redbrick.dcu.ie/assets/logo.png"
USE_AIKAR_FLAGS = true
MAX_MEMORY = 11G
MAX_MEMORY = 18G
MOTD = "LONG LIVE THE REDBRICK"
MAX_PLAYERS = "20"
VIEW_DISTANCE = "20"
MAX_PLAYERS = "32"
VIEW_DISTANCE = "32"
ENABLE_RCON = true
RCON_PASSWORD = {{ key "games/mc/vanilla-mc/rcon/password" }}
# Auto-download plugins

View file

@ -14,6 +14,9 @@ job "traefik" {
port "admin" {
static = 8080
}
port "ssh" {
static = 22
}
port "smtp" {
static = 25
}
@ -38,6 +41,12 @@ job "traefik" {
port "managesieve" {
static = 4190
}
port "voice-tcp" {
static = 4502
}
port "voice-udp" {
static = 4503
}
}
service {
@ -55,6 +64,7 @@ job "traefik" {
volumes = [
"local/traefik.toml:/etc/traefik/traefik.toml",
"/storage/nomad/traefik/acme/acme.json:/acme.json",
"/storage/nomad/traefik/access.log:/access.log",
]
}
@ -73,6 +83,9 @@ job "traefik" {
[entryPoints.traefik]
address = ":8080"
[entryPoints.ssh]
address = ":22"
[entryPoints.smtp]
address = ":25"
@ -97,6 +110,14 @@ job "traefik" {
[entryPoints.managesieve]
address = ":4190"
[entryPoints.voice-tcp]
address = ":4502"
[entryPoints.voice-udp]
address = ":4503/udp"
[entryPoints.voice-udp.udp]
timeout = "15s" # this will help reduce random dropouts in audio https://github.com/mumble-voip/mumble/issues/3550#issuecomment-441495977
[tls.options]
[tls.options.default]
minVersion = "VersionTLS12"
@ -122,6 +143,10 @@ job "traefik" {
address = "127.0.0.1:8500"
scheme = "http"
# Enable the file provider for dynamic configuration.
[providers.file]
filename = "/local/dynamic.toml"
#[providers.nomad]
# [providers.nomad.endpoint]
# address = "127.0.0.1:4646"
@ -131,9 +156,50 @@ job "traefik" {
email = "elected-admins@redbrick.dcu.ie"
storage = "acme.json"
[certificatesResolvers.lets-encrypt.acme.tlsChallenge]
[tracing]
[accessLog]
filePath = "/access.log"
EOF
destination = "/local/traefik.toml"
}
template {
data = <<EOF
[http]
[http.middlewares]
# handle redirects for short links
# NOTE: this is a consul template, add entries via consul kv
# create the middlewares with replacements for each redirect
{{ range $pair := tree "redirect/redbrick" }}
[http.middlewares.redirect-{{ trimPrefix "redirect/redbrick/" $pair.Key }}.redirectRegex]
regex = ".*" # match everything - hosts are handled by the router
replacement = "{{ $pair.Value }}"
permanent = true
{{- end }}
[http.routers]
# create routers with middlewares for each redirect
{{ range $pair := tree "redirect/redbrick" }}
[http.routers.{{ trimPrefix "redirect/redbrick/" $pair.Key }}-redirect]
rule = "Host(`{{ trimPrefix "redirect/redbrick/" $pair.Key }}.redbrick.dcu.ie`)"
entryPoints = ["web", "websecure"]
middlewares = ["redirect-{{ trimPrefix "redirect/redbrick/" $pair.Key }}"]
service = "dummy-service" # all routers need a service, this isn't used
[http.routers.{{ trimPrefix "redirect/redbrick/" $pair.Key }}-redirect.tls]
{{- end }}
[http.services]
[http.services.dummy-service.loadBalancer]
[[http.services.dummy-service.loadBalancer.servers]]
url = "http://127.0.0.1" # Dummy service - not used
EOF
destination = "local/dynamic.toml"
change_mode = "noop"
}
}
}
}

View file

@ -0,0 +1,44 @@
job "uptime-kuma" {
datacenters = ["aperture"]
type = "service"
group "web" {
count = 1
network {
port "http" {
to = 3001
}
}
service {
port = "http"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.uptime-kuma.rule=Host(`status.redbrick.dcu.ie`)",
"traefik.http.routers.uptime-kuma.entrypoints=web,websecure",
"traefik.http.routers.uptime-kuma.tls.certresolver=lets-encrypt",
]
}
task "web" {
driver = "docker"
config {
image = "louislam/uptime-kuma:1"
ports = ["http"]
volumes = [
"/storage/nomad/uptime-kuma/data:/app/data"
]
}
}
}
}

View file

@ -27,12 +27,18 @@ job "atlas" {
tags = [
"traefik.enable=true",
"traefik.http.routers.nginx-atlas.rule=Host(`redbrick.dcu.ie`) || Host(`rb.dcu.ie`)",
"traefik.http.routers.nginx-atlas.rule=Host(`redbrick.dcu.ie`) || Host(`www.redbrick.dcu.ie`) || Host(`www.rb.dcu.ie`) || Host(`rb.dcu.ie`)",
"traefik.http.routers.nginx-atlas.entrypoints=web,websecure",
"traefik.http.routers.nginx-atlas.tls.certresolver=lets-encrypt",
"traefik.http.routers.nginx-atlas.middlewares=redirect-user-web",
"traefik.http.routers.nginx-atlas.middlewares=atlas-www-redirect,redirect-user-web",
# redirect redbrick.dcu.ie/~user to user.redbrick.dcu.ie
"traefik.http.middlewares.redirect-user-web.redirectregex.regex=https://redbrick\\.dcu\\.ie/~([^/]*)/?([^/].*)?",
"traefik.http.middlewares.redirect-user-web.redirectregex.replacement=https://$1.redbrick.dcu.ie/$2",
"traefik.http.middlewares.redirect-user-web.redirectregex.permanent=true",
# redirect www.redbrick.dcu.ie to redbrick.dcu.ie
"traefik.http.middlewares.atlas-www-redirect.redirectregex.regex=^https?://www.redbrick.dcu.ie/(.*)",
"traefik.http.middlewares.atlas-www-redirect.redirectregex.replacement=https://redbrick.dcu.ie/$${1}",
"traefik.http.middlewares.atlas-www-redirect.redirectregex.permanent=true",
]
}

View file

@ -42,8 +42,11 @@ job "hedgedoc" {
driver = "docker"
config {
image = "quay.io/hedgedoc/hedgedoc:1.10.0"
image = "quay.io/hedgedoc/hedgedoc:1.10.2"
ports = ["http"]
volumes = [
"/storage/nomad/hedgedoc/banner:/hedgedoc/public/banner",
]
}
template {
@ -71,7 +74,7 @@ CMD_LDAP_PROVIDERNAME = "Redbrick"
CMD_LDAP_USERIDFIELD = "uidNumber"
CMD_LDAP_USERNAMEFIELD = "uid"
CMD_SESSION_SECRET = "{{ key "hedgedoc/session/secret" }}"
CMD_DEFAULT_PERMISSION = "private"
CMD_DEFAULT_PERMISSION = "limited"
# Security/Privacy
CMD_HSTS_PRELOAD = "true"

View file

@ -7,12 +7,15 @@ job "plausible" {
port "http" {
to = 8000
}
port "db" {
port "clickhouse" {
static = 8123
}
port "db" {
static = 5432
}
}
task "plausible" {
task "app" {
service {
name = "plausible"
port = "http"
@ -35,8 +38,11 @@ job "plausible" {
driver = "docker"
config {
image = "ghcr.io/plausible/community-edition:v2.1.1"
image = "ghcr.io/plausible/community-edition:v2.1"
ports = ["http"]
volumes = [
"/storage/nomad/${NOMAD_JOB_NAME}/${NOMAD_TASK_NAME}:/var/lib/plausible"
]
command = "/bin/sh"
args = ["-c", "sleep 10 && /entrypoint.sh db migrate && /entrypoint.sh run"]
@ -44,6 +50,8 @@ job "plausible" {
template {
data = <<EOH
TMPDIR=/var/lib/plausible/tmp
BASE_URL=https://plausible.redbrick.dcu.ie
SECRET_KEY_BASE={{ key "plausible/secret" }}
TOTP_VAULT_KEY={{ key "plausible/totp/key" }}
@ -57,8 +65,8 @@ GOOGLE_CLIENT_ID={{ key "plausible/google/client_id" }}
GOOGLE_CLIENT_SECRET={{ key "plausible/google/client_secret" }}
# Database settings
DATABASE_URL=postgres://{{ key "plausible/db/user" }}:{{ key "plausible/db/password" }}@postgres.service.consul:5432/{{ key "plausible/db/name" }}
CLICKHOUSE_DATABASE_URL=http://{{ env "NOMAD_ADDR_db" }}/plausible_events_db
DATABASE_URL=postgres://{{ key "plausible/db/user" }}:{{ key "plausible/db/password" }}@{{ env "NOMAD_ADDR_db" }}/{{ key "plausible/db/name" }}
CLICKHOUSE_DATABASE_URL=http://{{ env "NOMAD_ADDR_clickhouse" }}/plausible_events_db
# Email settings
MAILER_NAME="Redbrick Plausible"
@ -80,24 +88,43 @@ EOH
}
}
task "clickhouse" {
constraint {
attribute = "${attr.unique.hostname}"
value = "chell"
task "db" {
driver = "docker"
config {
image = "postgres:17-alpine"
ports = ["db"]
volumes = [
"/storage/nomad/${NOMAD_JOB_NAME}/${NOMAD_TASK_NAME}:/var/lib/postgresql/data",
]
}
template {
data = <<EOH
POSTGRES_PASSWORD={{ key "plausible/db/password" }}
POSTGRES_USER={{ key "plausible/db/user" }}
POSTGRES_NAME={{ key "plausible/db/name" }}
EOH
destination = "local/db.env"
env = true
}
}
task "clickhouse" {
service {
name = "plausible-clickhouse"
port = "db"
port = "clickhouse"
}
driver = "docker"
config {
image = "clickhouse/clickhouse-server:24.3.3.102-alpine"
ports = ["db"]
ports = ["clickhouse"]
volumes = [
"/opt/plausible/clickhouse:/var/lib/clickhouse",
"/storage/nomad/${NOMAD_JOB_NAME}/${NOMAD_TASK_NAME}:/var/lib/clickhouse",
"local/clickhouse.xml:/etc/clickhouse-server/config.d/logging.xml:ro",
"local/clickhouse-user-config.xml:/etc/clickhouse-server/users.d/logging.xml:ro"
]
@ -140,7 +167,7 @@ EOH
}
resources {
memory = 800
memory = 1000
}
}
}

View file

@ -1,96 +0,0 @@
job "postgres" {
datacenters = ["aperture"]
constraint {
attribute = "${attr.unique.hostname}"
value = "wheatley"
}
group "db" {
network {
port "db" {
static = 5432
}
}
task "postgres-db" {
driver = "docker"
template {
data = <<EOH
POSTGRES_PASSWORD="{{ key "postgres/password/root" }}"
POSTGRES_USER="{{ key "postgres/username/root" }}"
EOH
destination = "local/file.env"
env = true
}
config {
image = "postgres:latest"
ports = ["db"]
volumes = [
"/opt/postgres:/var/lib/postgresql/data",
"local/postgresql.conf:/etc/postgres/postgresql.conf",
"local/pg_hba.conf:/pg_hba.conf",
]
}
template {
data = <<EOH
max_connections = 100
shared_buffers = 2GB
effective_cache_size = 6GB
maintenance_work_mem = 512MB
checkpoint_completion_target = 0.9
wal_buffers = 16MB
default_statistics_target = 100
random_page_cost = 1.1
effective_io_concurrency = 200
work_mem = 5242kB
min_wal_size = 1GB
max_wal_size = 4GB
max_worker_processes = 4
max_parallel_workers_per_gather = 2
max_parallel_workers = 4
max_parallel_maintenance_workers = 2
hba_file = "/pg_hba.conf"
EOH
destination = "local/postgresql.conf"
}
template {
data = <<EOH
local all all trust
host all all 127.0.0.1/32 trust
host all all ::1/128 trust
local replication all trust
host replication all 127.0.0.1/32 trust
host replication all ::1/128 trust
host all all all scram-sha-256
EOH
destination = "local/pg_hba.conf"
}
resources {
cpu = 400
memory = 800
}
service {
name = "postgres"
port = "db"
check {
type = "tcp"
interval = "2s"
timeout = "2s"
}
}
}
}
}

View file

@ -1,4 +1,4 @@
job "postgres-backup" {
job "privatebin-backup" {
datacenters = ["aperture"]
type = "batch"
@ -20,17 +20,17 @@ job "postgres-backup" {
data = <<EOH
#!/bin/bash
file=/storage/backups/nomad/postgres/postgres-$(date +%Y-%m-%d_%H-%M-%S).sql
file=/storage/backups/nomad/privatebin/postgresql-privatebin-$(date +%Y-%m-%d_%H-%M-%S).sql
mkdir -p /storage/backups/nomad/postgres
mkdir -p /storage/backups/nomad/privatebin
alloc_id=$(nomad job status postgres | grep running | tail -n 1 | cut -d " " -f 1)
alloc_id=$(nomad job status privatebin | grep running | tail -n 1 | cut -d " " -f 1)
job_name=$(echo ${NOMAD_JOB_NAME} | cut -d "/" -f 1)
nomad alloc exec $alloc_id pg_dumpall -U {{ key "postgres/username/root" }} > "${file}"
nomad alloc exec -task db $alloc_id pg_dumpall -U {{ key "privatebin/db/user" }} > "${file}"
find /storage/backups/nomad/postgres/hedgedoc/postgres* -ctime +3 -exec rm {} \; || true
find /storage/backups/nomad/privatebin/postgresql-privatebin* -ctime +3 -exec rm {} \; || true
if [ -s "$file" ]; then # check if file exists and is not empty
echo "Backup successful"

View file

@ -10,6 +10,9 @@ job "privatebin" {
port "http" {
to = 8080
}
port "db" {
to = 5432
}
}
service {
@ -25,7 +28,7 @@ job "privatebin" {
tags = [
"traefik.enable=true",
"traefik.http.routers.privatebin.rule=Host(`paste.rb.dcu.ie`) || Host(`paste.redbrick.dcu.ie`)",
"traefik.http.routers.privatebin.rule=Host(`paste.redbrick.dcu.ie`) || Host(`paste.rb.dcu.ie`)",
"traefik.http.routers.privatebin.entrypoints=web,websecure",
"traefik.http.routers.privatebin.tls.certresolver=lets-encrypt",
]
@ -42,15 +45,10 @@ job "privatebin" {
"local/conf.php:/srv/data/conf.php",
]
}
template {
destination = "local/.env"
env = true
change_mode = "restart"
data = <<EOH
TZ=Europe/Dublin
PHP_TZ=Europe/Dublin
CONFIG_PATH=/srv/data/
EOH
env {
TZ = "Europe/Dublin"
PHP_TZ = "Europe/Dublin"
CONFIG_PATH = "/srv/data/"
}
template {
@ -59,7 +57,7 @@ EOH
[main]
name = "Redbrick PasteBin"
basepath = "https://paste.rb.dcu.ie/"
basepath = "https://paste.redbrick.dcu.ie/"
discussion = true
@ -185,13 +183,36 @@ batchsize = 10
[model]
class = Database
[model_options]
dsn = "pgsql:host=postgres.service.consul;dbname={{ key "privatebin/db/name" }}"
tbl = "privatebin_" ; table prefix
dsn = "pgsql:host={{ env "NOMAD_ADDR_db" }};dbname={{ key "privatebin/db/name" }}"
tbl = "{{ key "privatebin/db/name" }}" ; table prefix
usr = "{{ key "privatebin/db/user" }}"
pwd = "{{ key "privatebin/db/password" }}"
opt[12] = true ; PDO::ATTR_PERSISTENT ; use persistent connections - default
EOH
}
}
task "db" {
driver = "docker"
config {
image = "postgres:17-alpine"
ports = ["db"]
volumes = [
"/storage/nomad/${NOMAD_JOB_NAME}/${NOMAD_TASK_NAME}:/var/lib/postgresql/data",
]
}
template {
data = <<EOH
POSTGRES_PASSWORD={{ key "privatebin/db/password" }}
POSTGRES_USER={{ key "privatebin/db/user" }}
POSTGRES_NAME={{ key "privatebin/db/name" }}
EOH
destination = "local/db.env"
env = true
}
}
}
}

View file

@ -0,0 +1,50 @@
job "vaultwarden-backup" {
datacenters = ["aperture"]
type = "batch"
periodic {
crons = ["0 */3 * * * *"]
prohibit_overlap = true
}
group "db-backup" {
task "postgres-backup" {
driver = "raw_exec"
config {
command = "/bin/bash"
args = ["local/script.sh"]
}
template {
data = <<EOH
#!/bin/bash
file=/storage/backups/nomad/vaultwarden/postgresql-vaultwarden-$(date +%Y-%m-%d_%H-%M-%S).sql
mkdir -p /storage/backups/nomad/vaultwarden
alloc_id=$(nomad job status vaultwarden | grep running | tail -n 1 | cut -d " " -f 1)
job_name=$(echo ${NOMAD_JOB_NAME} | cut -d "/" -f 1)
nomad alloc exec -task db $alloc_id pg_dumpall -U {{ key "vaultwarden/db/user" }} > "${file}"
find /storage/backups/nomad/vaultwarden/postgresql-vaultwarden* -ctime +3 -exec rm {} \; || true
if [ -s "$file" ]; then # check if file exists and is not empty
echo "Backup successful"
exit 0
else
rm $file
curl -H "Content-Type: application/json" -d \
'{"content": "<@&585512338728419341> `PostgreSQL` backup for **'"${job_name}"'** has just **FAILED**\nFile name: `'"$file"'`\nDate: `'"$(TZ=Europe/Dublin date)"'`\nTurn off this script with `nomad job stop '"${job_name}"'` \n\n## Remember to restart this backup job when fixed!!!"}' \
{{ key "postgres/webhook/discord" }}
fi
EOH
destination = "local/script.sh"
}
}
}
}

View file

@ -9,6 +9,9 @@ job "vaultwarden" {
port "http" {
to = 80
}
port "db" {
to = 5432
}
}
service {
@ -31,14 +34,15 @@ job "vaultwarden" {
ports = ["http"]
volumes = [
"/storage/nomad/vaultwarden:/data"
"/storage/nomad/${NOMAD_JOB_NAME}:/data",
"/etc/localtime:/etc/localtime:ro"
]
}
template {
data = <<EOF
DOMAIN=https://vault.redbrick.dcu.ie
DATABASE_URL=postgresql://{{ key "vaultwarden/db/user" }}:{{ key "vaultwarden/db/password" }}@postgres.service.consul:5432/{{ key "vaultwarden/db/name" }}
DATABASE_URL=postgresql://{{ key "vaultwarden/db/user" }}:{{ key "vaultwarden/db/password" }}@{{ env "NOMAD_ADDR_db" }}/{{ key "vaultwarden/db/name" }}
SIGNUPS_ALLOWED=false
INVITATIONS_ALLOWED=true
@ -55,14 +59,37 @@ EOF
destination = "local/env"
env = true
}
# These yubico variables are not necessary for yubikey support, only to verify the keys with yubico.
#YUBICO_CLIENT_ID={{ key "vaultwarden/yubico/client_id" }}
#YUBICO_SECRET_KEY={{ key "vaultwarden/yubico/secret_key" }}
# These yubico variables are not necessary for yubikey support, only to verify the keys with yubico.
#YUBICO_CLIENT_ID={{ key "vaultwarden/yubico/client_id" }}
#YUBICO_SECRET_KEY={{ key "vaultwarden/yubico/secret_key" }}
resources {
cpu = 500
memory = 500
}
}
task "db" {
driver = "docker"
config {
image = "postgres:17-alpine"
ports = ["db"]
volumes = [
"/storage/nomad/${NOMAD_JOB_NAME}/${NOMAD_TASK_NAME}:/var/lib/postgresql/data",
]
}
template {
data = <<EOH
POSTGRES_PASSWORD={{ key "vaultwarden/db/password" }}
POSTGRES_USER={{ key "vaultwarden/db/user" }}
POSTGRES_NAME={{ key "vaultwarden/db/name" }}
EOH
destination = "local/db.env"
env = true
}
}
}
}

View file

@ -41,7 +41,7 @@ $wgDBpassword = "{{ key "mediawiki/db/password" }}";
# MySQL specific settings
$wgDBprefix = "rbwiki_";
# MySQL table options to use during installation or update
$wgDBTableOptions = "ENGINE=InnoDB, DEFAULT CHARSET=binary";
$wgDBTableOptions = "ENGINE=InnoDB, DEFAULT CHARSET=utf8mb4";
## Shared memory settings
$wgMainCacheType = CACHE_NONE;
@ -89,11 +89,15 @@ wfLoadSkin( 'Vector' );
wfLoadSkin( 'Citizen' );
wfLoadSkin( 'Timeless' );
wfLoadSkin( 'MinervaNeue' );
wfLoadSkin( 'Medik' );
$wgCitizenThemeColor = "#a81e22";
$wgCitizenShowPageTools = "permission";
$wgCitizenSearchDescriptionSource = "pagedescription";
$wgMedikColor = "#a81e22";
$wgMedikShowLogo = "main";
$wgLocalisationUpdateDirectory = "$IP/cache";
# load extensions

View file

@ -17,7 +17,7 @@ job "mediawiki-backup" {
}
template {
data = <<EOH
data = <<EOH
#!/bin/bash
file=/storage/backups/nomad/wiki/mysql/rbwiki-mysql-$(date +%Y-%m-%d_%H-%M-%S).sql
@ -30,7 +30,7 @@ job_name=$(echo ${NOMAD_JOB_NAME} | cut -d "/" -f 1)
nomad alloc exec -task rbwiki-db $alloc_id mariadb-dump -u {{ key "mediawiki/db/username" }} -p'{{ key "mediawiki/db/password"}}' {{ key "mediawiki/db/name" }} > "${file}"
find /storage/backups/nomad/wiki/mysql/rbwiki-mysql* -ctime +3 -exec rm {} \; || true
find /storage/backups/nomad/wiki/mysql/rbwiki-mysql* -ctime +30 -exec rm {} \; || true
if [ -s "$file" ]; then # check if file exists and is not empty
echo "Backup successful"
@ -56,7 +56,7 @@ EOH
}
template {
data = <<EOH
data = <<EOH
#!/bin/bash
file=/storage/backups/nomad/wiki/xml/rbwiki-dump-$(date +%Y-%m-%d_%H-%M-%S).xml

View file

@ -1,6 +1,6 @@
job "mediawiki" {
datacenters = ["aperture"]
type = "service"
type = "service"
meta {
domain = "wiki.redbrick.dcu.ie"
@ -27,10 +27,10 @@ job "mediawiki" {
port = "http"
check {
type = "http"
path = "/Main_Page"
type = "http"
path = "/Main_Page"
interval = "10s"
timeout = "5s"
timeout = "5s"
}
tags = [
@ -61,11 +61,11 @@ job "mediawiki" {
]
}
resources {
cpu = 200
memory = 100
}
cpu = 200
memory = 100
}
template {
data = <<EOH
data = <<EOH
# user www-data www-data;
error_log /dev/stderr error;
events {
@ -139,9 +139,9 @@ EOH
}
resources {
cpu = 4000
memory = 1200
}
cpu = 4000
memory = 1200
}
template {
data = <<EOH
@ -179,7 +179,7 @@ EOH
}
template {
data = file("LocalSettings.php")
data = file("LocalSettings.php")
destination = "local/LocalSettings.php"
}
}
@ -189,10 +189,10 @@ EOH
port = "db"
check {
name = "mariadb_probe"
type = "tcp"
name = "mariadb_probe"
type = "tcp"
interval = "10s"
timeout = "2s"
timeout = "2s"
}
}
@ -213,6 +213,18 @@ EOH
template {
data = <<EOH
[mysqld]
# Ensure full UTF-8 support
character-set-server = utf8mb4
collation-server = utf8mb4_unicode_ci
skip-character-set-client-handshake
# Fix 1000-byte key length issue
innodb_large_prefix = 1
innodb_file_format = Barracuda
innodb_file_per_table = 1
innodb_default_row_format = dynamic
# Performance optimizations (Keep these based on your system)
max_connections = 100
key_buffer_size = 2G
query_cache_size = 0
@ -224,22 +236,23 @@ innodb_io_capacity = 200
tmp_table_size = 5242K
max_heap_table_size = 5242K
innodb_log_buffer_size = 16M
innodb_file_per_table = 1
bind-address = 0.0.0.0
# Logging
slow_query_log = 1
slow_query_log_file = /var/log/mysql/slow.log
long_query_time = 1
# Network
bind-address = 0.0.0.0
EOH
destination = "local/conf.cnf"
}
resources {
cpu = 800
memory = 1200
}
cpu = 800
memory = 2500
}
template {
data = <<EOH
@ -250,7 +263,7 @@ MYSQL_RANDOM_ROOT_PASSWORD=yes
EOH
destination = "local/.env"
env = true
env = true
}
}
}

View file

@ -12,7 +12,7 @@ job "ams-amikon" {
network {
port "http" {
to = 80
to = 3000
}
}
@ -20,35 +20,45 @@ job "ams-amikon" {
port = "http"
check {
type = "http"
path = "/"
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.ams-amikon.rule=Host(`amikon.me`) || Host(`www.amikon.me`)",
"traefik.http.routers.ams-amikon.entrypoints=web,websecure",
"traefik.http.routers.ams-amikon.tls.certresolver=lets-encrypt",
"traefik.http.routers.ams-amikon.middlewares=www-redirect",
"traefik.http.middlewares.www-redirect.redirectregex.regex=^https?://www.amikon.me/(.*)",
"traefik.http.middlewares.www-redirect.redirectregex.replacement=https://amikon.me/$${1}",
"traefik.http.middlewares.www-redirect.redirectregex.permanent=true",
"traefik.http.routers.ams-amikon.middlewares=amikon-www-redirect",
"traefik.http.middlewares.amikon-www-redirect.redirectregex.regex=^https?://www.amikon.me/(.*)",
"traefik.http.middlewares.amikon-www-redirect.redirectregex.replacement=https://amikon.me/$${1}",
"traefik.http.middlewares.amikon-www-redirect.redirectregex.permanent=true",
]
}
task "amikon-nginx" {
task "amikon-node" {
driver = "docker"
config {
image = "ghcr.io/dcuams/amikon-site-v2:latest"
image = "ghcr.io/dcuams/amikon-site-v2:latest"
force_pull = true
ports = ["http"]
ports = ["http"]
}
template {
data = <<EOF
EMAIL={{ key "ams/amikon/email/user" }}
EMAIL_PASS={{ key "ams/amikon/email/password" }}
TO_EMAIL={{ key "ams/amikon/email/to" }}
EOF
destination = ".env"
env = true
}
resources {
cpu = 100
memory = 50
cpu = 800
memory = 500
}
}
}

View file

@ -126,7 +126,7 @@ EOH
driver = "docker"
config {
image = "postgres:alpine"
image = "postgres:16-alpine"
ports = ["db"]
volumes = [

View file

@ -53,6 +53,11 @@ DOCKER_PASS={{ key "dcusr/ghcr/password" }}
TO_EMAIL={{ key "dcusr/nodemailer/to" }}
EMAIL={{ key "dcusr/nodemailer/from" }}
EMAIL_PASS={{ key "dcusr/nodemailer/password" }}
LISTMONK_ENDPOINT={{ key "dcusr/listmonk/endpoint" }}
LISTMONK_USERNAME={{ key "dcusr/listmonk/username" }}
LISTMONK_PASSWORD={{ key "dcusr/listmonk/password" }}
LISTMONK_LIST_IDS={{ key "dcusr/listmonk/list/id" }}
RECAPTCHA_SECRET_KEY={{ key "dcusr/recaptcha/secret/key" }}
EOH
}
}

View file

@ -10,6 +10,7 @@ job "esports-discord-bot" {
config {
image = "ghcr.io/aydenjahola/discord-multipurpose-bot:main"
force_pull = true
}
resources {
@ -28,6 +29,15 @@ RAPIDAPI_KEY={{ key "socs/esports/bot/rapidapi/key" }}
TRACKER_API_KEY={{ key "socs/esports/bot/trackerapi/key" }}
TRACKER_API_URL={{ key "socs/esports/bot/trackerapi/url" }}
WORDNIK_API_KEY={{key "socs/esports/bot/wordnikapi/key" }}
HUGGING_FACE_API_KEY={{ key "socs/esports/bot/huggingface/key" }}
RCON_HOST=esports-mc-rcon.service.consul
# https://discuss.hashicorp.com/t/passing-registered-ip-and-port-from-consul-to-env-nomad-job-section/35647
{{ range service "esports-mc-rcon" }}
RCON_PORT={{ .Port }}{{ end }}
RCON_PASSWORD={{ key "games/mc/esports-mc/rcon/password" }}
EOH
destination = "local/.env"
env = true

View file

@ -46,9 +46,6 @@ job "mps-site" {
username = "${DOCKER_USER}"
password = "${DOCKER_PASS}"
}
volumes = [
"local/hosts:/etc/hosts",
]
}
template {
@ -60,23 +57,6 @@ EOH
env = true
}
template {
data = <<EOF
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
172.17.0.17 {{ env "NOMAD_TASK_NAME" }}
# use internal IP for thecollegeview.ie as external IP isn't routable
192.168.0.158 thecollegeview.ie
192.168.0.158 www.thecollegeview.ie
EOF
destination = "local/hosts"
}
resources {
cpu = 300
memory = 500

View file

@ -0,0 +1,49 @@
job "mps-thecollegeview-backup" {
datacenters = ["aperture"]
type = "batch"
periodic {
crons = ["0 */3 * * * *"]
prohibit_overlap = true
}
group "db-backup" {
task "mysql-backup" {
driver = "raw_exec"
config {
command = "/bin/bash"
args = ["local/mysql-backup.sh"]
}
template {
data = <<EOH
#!/bin/bash
file=/storage/backups/nomad/mps-thecollegeview/mysql/tcv-mysql-$(date +%Y-%m-%d_%H-%M-%S).sql
mkdir -p /storage/backups/nomad/mps-thecollegeview/mysql
alloc_id=$(nomad job status mps-thecollegeview | grep running | tail -n 1 | cut -d " " -f 1)
job_name=$(echo ${NOMAD_JOB_NAME} | cut -d "/" -f 1)
nomad alloc exec -task tcv-db $alloc_id mariadb-dump -u {{ key "mps/thecollegeview/db/username" }} -p'{{ key "mps/thecollegeview/db/password"}}' {{ key "mps/thecollegeview/db/name" }} > "${file}"
find /storage/backups/nomad/mps-thecollegeview/mysql/tcv-mysql* -ctime +3 -exec rm {} \; || true
if [ -s "$file" ]; then # check if file exists and is not empty
echo "Backup successful"
exit 0
else
rm $file
curl -H "Content-Type: application/json" -d \
'{"content": "# <@&585512338728419341> `MySQL` backup for **'"${job_name}"'** has just **FAILED**\nFile name: `'"$file"'`\nDate: `'"$(TZ=Europe/Dublin date)"'`\nTurn off this script with `nomad job stop '"${job_name}"'` \n\n## Remember to restart this backup job when fixed!!!"}' \
{{ key "mysql/webhook/discord" }}
fi
EOH
destination = "local/mysql-backup.sh"
}
}
}
}

View file

@ -0,0 +1,257 @@
job "mps-thecollegeview" {
datacenters = ["aperture"]
type = "service"
meta {
domain = "thecollegeview.ie"
}
group "tcv" {
count = 1
network {
mode = "bridge"
port "http" {
to = 80
}
port "fpm" {
to = 9000
}
port "db" {
to = 3306
}
port "redis" {
to = 6379
}
}
service {
name = "tcv-web"
port = "http"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "5s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.tcv.rule=Host(`${NOMAD_META_domain}`)",
"traefik.http.routers.tcv.entrypoints=web,websecure",
"traefik.http.routers.tcv.tls.certresolver=lets-encrypt",
]
}
task "tcv-nginx" {
driver = "docker"
config {
image = "nginx:alpine"
ports = ["http"]
volumes = [
"local/nginx.conf:/etc/nginx/nginx.conf",
"/storage/nomad/mps-thecollegeview:/var/www/html/",
]
group_add = [82] # www-data in alpine
}
resources {
cpu = 200
memory = 100
}
template {
data = <<EOH
# user www-data www-data;
error_log /dev/stderr error;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
server_tokens off;
error_log /dev/stderr error;
access_log /dev/stdout;
charset utf-8;
server {
server_name {{ env "NOMAD_META_domain" }};
listen 80;
listen [::]:80;
root /var/www/html;
index index.php index.html index.htm;
client_max_body_size 5m;
client_body_timeout 60;
# NOTE: Not used here, WP super cache rule used instead
# # Pass all folders to FPM
# location / {
# try_files $uri $uri/ /index.php?$args;
# }
# Pass the PHP scripts to FastCGI server
location ~ \.php$ {
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_pass {{ env "NOMAD_ADDR_fpm" }};
fastcgi_index index.php;
}
location ~ /\.ht {
deny all;
}
# WP Super Cache rules.
set $cache_uri $request_uri;
# POST requests and urls with a query string should always go to PHP
if ($request_method = POST) {
set $cache_uri 'null cache';
}
if ($query_string != "") {
set $cache_uri 'null cache';
}
# Don't cache uris containing the following segments
if ($request_uri ~* "(/wp-admin/|/xmlrpc.php|/wp-(app|cron|login|register|mail).php|wp-.*.php|/feed/|index.php|wp-comments-popup.php|wp-links-opml.php|wp-locations.php|sitemap(_index)?.xml|[a-z0-9_-]+-sitemap([0-9]+)?.xml)") {
set $cache_uri 'null cache';
}
# Don't use the cache for logged in users or recent commenters
if ($http_cookie ~* "comment_author|wordpress_[a-f0-9]+|wp-postpass|wordpress_logged_in") {
set $cache_uri 'null cache';
}
# Use cached or actual file if they exists, otherwise pass request to WordPress
location / {
try_files /wp-content/cache/supercache/$http_host/$cache_uri/index.html $uri $uri/ /index.php?$args ;
}
}
}
EOH
destination = "local/nginx.conf"
}
}
task "tcv-phpfpm" {
driver = "docker"
config {
image = "wordpress:php8.3-fpm-alpine"
ports = ["fpm"]
volumes = [
"/storage/nomad/mps-thecollegeview:/var/www/html/",
"local/custom.ini:/usr/local/etc/php/conf.d/custom.ini",
]
}
resources {
cpu = 800
memory = 500
}
template {
data = <<EOH
WORDPRESS_DB_HOST={{ env "NOMAD_ADDR_db" }}
WORDPRESS_DB_USER={{ key "mps/thecollegeview/db/username" }}
WORDPRESS_DB_PASSWORD={{ key "mps/thecollegeview/db/password" }}
WORDPRESS_DB_NAME={{ key "mps/thecollegeview/db/name" }}
WORDPRESS_TABLE_PREFIX=wp_2
WORDPRESS_CONFIG_EXTRA="define('WP_REDIS_HOST', '{{ env "NOMAD_ADDR_redis" }}');"
EOH
destination = "local/.env"
env = true
}
template {
data = <<EOH
pm.max_children = 10
upload_max_filesize = 64M
post_max_size = 64M
EOH
destination = "local/custom.ini"
}
}
service {
name = "tcv-db"
port = "db"
}
task "tcv-db" {
driver = "docker"
config {
image = "mariadb"
ports = ["db"]
volumes = [
"/storage/nomad/mps-thecollegeview/db:/var/lib/mysql",
]
}
template {
data = <<EOH
[mysqld]
max_connections = 100
key_buffer_size = 2G
query_cache_size = 0
innodb_buffer_pool_size = 6G
innodb_log_file_size = 512M
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
innodb_io_capacity = 200
tmp_table_size = 5242K
max_heap_table_size = 5242K
innodb_log_buffer_size = 16M
innodb_file_per_table = 1
bind-address = 0.0.0.0
# Logging
slow_query_log = 1
slow_query_log_file = /var/log/mysql/slow.log
long_query_time = 1
EOH
destination = "local/conf.cnf"
}
resources {
cpu = 800
memory = 800
}
template {
data = <<EOH
MYSQL_DATABASE={{ key "mps/thecollegeview/db/name" }}
MYSQL_USER={{ key "mps/thecollegeview/db/username" }}
MYSQL_PASSWORD={{ key "mps/thecollegeview/db/password" }}
MYSQL_RANDOM_ROOT_PASSWORD=yes
EOH
destination = "local/.env"
env = true
}
}
task "redis" {
driver = "docker"
config {
image = "redis:latest"
ports = ["redis"]
}
resources {
cpu = 200
}
}
}
}

View file

@ -0,0 +1,49 @@
job "style-thelook-backup" {
datacenters = ["aperture"]
type = "batch"
periodic {
crons = ["0 */3 * * * *"]
prohibit_overlap = true
}
group "db-backup" {
task "mysql-backup" {
driver = "raw_exec"
config {
command = "/bin/bash"
args = ["local/mysql-backup.sh"]
}
template {
data = <<EOH
#!/bin/bash
file=/storage/backups/nomad/style-thelook/mysql/thelook-mysql-$(date +%Y-%m-%d_%H-%M-%S).sql
mkdir -p /storage/backups/nomad/style-thelook/mysql
alloc_id=$(nomad job status style-thelook | grep running | tail -n 1 | cut -d " " -f 1)
job_name=$(echo ${NOMAD_JOB_NAME} | cut -d "/" -f 1)
nomad alloc exec -task thelook-db $alloc_id mariadb-dump -u {{ key "style/thelook/db/username" }} -p'{{ key "style/thelook/db/password"}}' {{ key "style/thelook/db/name" }} > "${file}"
find /storage/backups/nomad/style-thelook/mysql/thelook-mysql* -ctime +3 -exec rm {} \; || true
if [ -s "$file" ]; then # check if file exists and is not empty
echo "Backup successful"
exit 0
else
rm $file
curl -H "Content-Type: application/json" -d \
'{"content": "# <@&585512338728419341> `MySQL` backup for **'"${job_name}"'** has just **FAILED**\nFile name: `'"$file"'`\nDate: `'"$(TZ=Europe/Dublin date)"'`\nTurn off this script with `nomad job stop '"${job_name}"'` \n\n## Remember to restart this backup job when fixed!!!"}' \
{{ key "mysql/webhook/discord" }}
fi
EOH
destination = "local/mysql-backup.sh"
}
}
}
}

257
jobs/socs/style-thelook.hcl Normal file
View file

@ -0,0 +1,257 @@
job "style-thelook" {
datacenters = ["aperture"]
type = "service"
meta {
domain = "thelookonline.dcu.ie"
}
group "thelook" {
count = 1
network {
mode = "bridge"
port "http" {
to = 80
}
port "fpm" {
to = 9000
}
port "db" {
to = 3306
}
port "redis" {
to = 6379
}
}
service {
name = "thelook-web"
port = "http"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "5s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.thelook.rule=Host(`${NOMAD_META_domain}`) || Host(`style.redbrick.dcu.ie`)",
"traefik.http.routers.thelook.entrypoints=web,websecure",
"traefik.http.routers.thelook.tls.certresolver=lets-encrypt",
]
}
task "thelook-nginx" {
driver = "docker"
config {
image = "nginx:alpine"
ports = ["http"]
volumes = [
"local/nginx.conf:/etc/nginx/nginx.conf",
"/storage/nomad/style-thelook:/var/www/html/",
]
group_add = [82] # www-data in alpine
}
resources {
cpu = 200
memory = 100
}
template {
data = <<EOH
# user www-data www-data;
error_log /dev/stderr error;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
server_tokens off;
error_log /dev/stderr error;
access_log /dev/stdout;
charset utf-8;
server {
server_name {{ env "NOMAD_META_domain" }};
listen 80;
listen [::]:80;
root /var/www/html;
index index.php index.html index.htm;
client_max_body_size 5m;
client_body_timeout 60;
# NOTE: Not used here, WP super cache rule used instead
# Pass all folders to FPM
# location / {
# try_files $uri $uri/ /index.php?$args;
# }
# Pass the PHP scripts to FastCGI server
location ~ \.php$ {
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_pass {{ env "NOMAD_ADDR_fpm" }};
fastcgi_index index.php;
}
location ~ /\.ht {
deny all;
}
# WP Super Cache rules.
set $cache_uri $request_uri;
# POST requests and urls with a query string should always go to PHP
if ($request_method = POST) {
set $cache_uri 'null cache';
}
if ($query_string != "") {
set $cache_uri 'null cache';
}
# Don't cache uris containing the following segments
if ($request_uri ~* "(/wp-admin/|/xmlrpc.php|/wp-(app|cron|login|register|mail).php|wp-.*.php|/feed/|index.php|wp-comments-popup.php|wp-links-opml.php|wp-locations.php|sitemap(_index)?.xml|[a-z0-9_-]+-sitemap([0-9]+)?.xml)") {
set $cache_uri 'null cache';
}
# Don't use the cache for logged in users or recent commenters
if ($http_cookie ~* "comment_author|wordpress_[a-f0-9]+|wp-postpass|wordpress_logged_in") {
set $cache_uri 'null cache';
}
# Use cached or actual file if they exists, otherwise pass request to WordPress
location / {
try_files /wp-content/cache/supercache/$http_host/$cache_uri/index.html $uri $uri/ /index.php?$args ;
}
}
}
EOH
destination = "local/nginx.conf"
}
}
task "thelook-phpfpm" {
driver = "docker"
config {
image = "wordpress:php8.3-fpm-alpine"
ports = ["fpm"]
volumes = [
"/storage/nomad/style-thelook:/var/www/html/",
"local/custom.ini:/usr/local/etc/php/conf.d/custom.ini",
]
}
resources {
cpu = 800
memory = 500
}
template {
data = <<EOH
WORDPRESS_DB_HOST={{ env "NOMAD_ADDR_db" }}
WORDPRESS_DB_USER={{ key "style/thelook/db/username" }}
WORDPRESS_DB_PASSWORD={{ key "style/thelook/db/password" }}
WORDPRESS_DB_NAME={{ key "style/thelook/db/name" }}
WORDPRESS_TABLE_PREFIX=wp_
WORDPRESS_CONFIG_EXTRA="define('WP_REDIS_HOST', '{{ env "NOMAD_ADDR_redis" }}');"
EOH
destination = "local/.env"
env = true
}
template {
data = <<EOH
pm.max_children = 10
upload_max_filesize = 64M
post_max_size = 64M
EOH
destination = "local/custom.ini"
}
}
service {
name = "thelook-db"
port = "db"
}
task "thelook-db" {
driver = "docker"
config {
image = "mariadb"
ports = ["db"]
volumes = [
"/storage/nomad/style-thelook/db:/var/lib/mysql",
]
}
template {
data = <<EOH
[mysqld]
max_connections = 100
key_buffer_size = 2G
query_cache_size = 0
innodb_buffer_pool_size = 6G
innodb_log_file_size = 512M
innodb_flush_log_at_trx_commit = 1
innodb_flush_method = O_DIRECT
innodb_io_capacity = 200
tmp_table_size = 5242K
max_heap_table_size = 5242K
innodb_log_buffer_size = 16M
innodb_file_per_table = 1
bind-address = 0.0.0.0
# Logging
slow_query_log = 1
slow_query_log_file = /var/log/mysql/slow.log
long_query_time = 1
EOH
destination = "local/conf.cnf"
}
resources {
cpu = 800
memory = 800
}
template {
data = <<EOH
MYSQL_DATABASE={{ key "style/thelook/db/name" }}
MYSQL_USER={{ key "style/thelook/db/username" }}
MYSQL_PASSWORD={{ key "style/thelook/db/password" }}
MYSQL_RANDOM_ROOT_PASSWORD=yes
EOH
destination = "local/.env"
env = true
}
}
task "redis" {
driver = "docker"
config {
image = "redis:latest"
ports = ["redis"]
}
resources {
cpu = 200
}
}
}
}

View file

@ -0,0 +1,36 @@
job "urri-meetups-update" {
datacenters = ["aperture"]
type = "batch"
periodic {
crons = ["0 */6 * * * *"]
prohibit_overlap = true
}
group "urri-meetups-update" {
task "urri-meetups-update" {
driver = "raw_exec"
config {
command = "/bin/bash"
args = ["local/script.sh"]
}
template {
data = <<EOH
#!/bin/bash
# stop the urri-meetups job
nomad job stop urri-meetups
sleep 1
# revert the urri-meetups job to the previous version
# this will trigger a new deployment, which will pull the latest image
nomad job revert urri-meetups $(($(nomad job inspect urri-meetups | jq '.Job.Version')-1))
EOH
destination = "local/script.sh"
}
}
}
}

View file

@ -0,0 +1,47 @@
job "urri-meetups" {
datacenters = ["aperture"]
type = "service"
group "urri-meetups" {
count = 1
network {
port "http" {
to = 8000
}
}
service {
port = "http"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.urri-meetups.rule=Host(`urri-meetups.rb.dcu.ie`)",
"traefik.http.routers.urri-meetups.entrypoints=web,websecure",
"traefik.http.routers.urri-meetups.tls.certresolver=lets-encrypt",
]
}
task "web" {
driver = "docker"
config {
image = "ghcr.io/haefae222/pizza_app:latest"
ports = ["http"]
force_pull = true
}
resources {
cpu = 1000
memory = 800
}
}
}
}

View file

@ -0,0 +1,61 @@
job "cands-room-bookings" {
datacenters = ["aperture"]
type = "service"
meta {
git-sha = ""
}
group "clubsandsocs-room-bookings" {
count = 1
network {
port "http" {
to = 5000
}
}
service {
port = "http"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.clubsandsocs-room-bookings.rule=Host(`rooms.rb.dcu.ie`)",
"traefik.http.routers.clubsandsocs-room-bookings.entrypoints=web,websecure",
"traefik.http.routers.clubsandsocs-room-bookings.tls.certresolver=lets-encrypt",
]
}
task "web" {
driver = "docker"
config {
image = "ghcr.io/wizzdom/clubsandsocs-room-bookings:latest"
ports = ["http"]
force_pull = true
volumes = [
"local/.env:/app/.env"
]
}
template {
data = <<EOF
UPLOAD_FOLDER=uploads
SECRET_KEY={{ key "user-projects/wizzdom/clubsandsocs-room-bookings/secret" }}
EOF
destination = "local/.env"
}
resources {
cpu = 1000
memory = 800
}
}
}
}

View file

@ -38,7 +38,7 @@ alloc_id=$(nomad job status -verbose bastion-vm | grep running | tail -n 1 | cut
job_name=$(echo ${NOMAD_JOB_NAME} | cut -d "/" -f 1)
echo "Backing up alloc id: ${alloc_id} on: ${host} to ${path}/${file}..."
scp -B -i {{ key "bastion-vm/service/key" }} {{ key "bastion-vm/service/user" }}@${host}:/opt/nomad/alloc/${alloc_id}/bastion-vm/local/bastion-vm.qcow2 ${path}/${file}
ssh -i {{ key "bastion-vm/service/key" }} {{ key "bastion-vm/service/user" }}@${host} "sudo cat /opt/nomad/alloc/${alloc_id}/bastion-vm/local/bastion-vm.qcow2" > ${path}/${file}
find ${path}/bastion-vm-* -ctime +2 -exec rm {} \; || true

View file

@ -21,7 +21,7 @@ job "blockbot" {
template {
data = <<EOF
TOKEN={{ key "blockbot/discord/token" }}
DEBUG= # empty means false
DEBUG=false
EOF
destination = "local/.env"
env = true