Merge branch 'master' into update-bastion

This commit is contained in:
wizzdom 2024-04-04 05:12:27 +01:00 committed by GitHub
commit ede914d199
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
21 changed files with 1190 additions and 73 deletions

View file

@ -6,7 +6,7 @@
- nfs-common
when: ansible_os_family == "Debian"
- name: create mount point
- name: create /storage mount point
become: true
ansible.builtin.file:
path: /storage
@ -14,6 +14,14 @@
mode: "0755"
when: ansible_os_family == "Debian"
- name: create /oldstorage mount directory
become: true
ansible.builtin.file:
path: /oldstorage
state: directory
mode: "0755"
when: ansible_os_family == "Debian"
- name: add nfs entry to fstab
become: true
ansible.builtin.lineinfile:
@ -23,6 +31,7 @@
create: yes
with_items:
- "10.10.0.7:/storage /storage nfs defaults 0 0"
- "192.168.0.150:/zbackup /oldstorage nfs defaults 0 0"
- name: mount nfs
become: true

View file

@ -1,6 +1,6 @@
#!/bin/bash
sudo chown -R root:nomad ./jobs
sudo chown -R root:nomad ./
sudo find . -type d -exec chmod 775 {} \;

View file

@ -31,16 +31,16 @@ job "minecraft" {
resources {
cpu = 3000 # 3000 MHz
memory = 8192 # 8gb
memory = 8192 # 8GB
}
env {
EULA = "TRUE"
TYPE = "PAPER"
ICON = "https://docs.redbrick.dcu.ie/assets/logo.png"
MEMORY = "6G"
USE_AIKAR_FLAGS=true
MOTD = "LONG LIVE THE REDBRICK"
MAX_PLAYERS = "20"
}
}
}
@ -76,111 +76,63 @@ job "minecraft" {
}
resources {
cpu = 7000 # 7000 MHz
memory = 17408 # 17GB
cpu = 3000 # 3000 MHz
memory = 8192 # 8GB
}
env {
EULA = "TRUE"
TYPE = "PURPUR"
VERSION = "1.20.1"
MOTD = "DCU Games Minecraft Server"
MOTD = "DCU Games Soc Minecraft Server"
USE_AIKAR_FLAGS=true
OPS = ""
MAX_PLAYERS = "20"
}
}
}
group "fugitives-mc" {
group "olim909-mc" {
count = 1
network {
port "mc-fugitives-port" {
static = 25566
to = 25565
}
port "mc-fugitives-rcon" {
to = 25575
}
}
service {
name = "fugitives-mc"
}
task "minecraft-fugitives" {
driver = "docker"
config {
image = "itzg/minecraft-server"
ports = ["mc-fugitives-port","mc-fugitives-rcon"]
}
resources {
cpu = 3000 # 3000 MHz
memory = 8168 # 8gb
}
env {
EULA = "TRUE"
MEMORY = "6G"
USE_AIKAR_FLAGS=true
}
}
}
group "shemek-mc" {
count = 1
network {
port "mc-shemek-port" {
port "mc-olim909-port" {
static = 25568
to = 25565
}
port "mc-shemek-rcon" {
port "mc-olim909-rcon" {
to = 25575
}
}
service {
name = "shemek-mc"
name = "olim909-mc"
}
task "minecraft-shemek" {
task "minecraft-olim909" {
driver = "docker"
config {
image = "itzg/minecraft-server"
ports = ["mc-shemek-port","mc-shemek-rcon"]
ports = ["mc-olim909-port","mc-olim909-rcon"]
volumes = [
"/storage/nomad/${NOMAD_TASK_NAME}:/data"
]
}
resources {
cpu = 7000 # 7000 MHz
memory = 17408 # 17GB
cpu = 3000 # 3000 MHz
memory = 4096 # 4GB
}
env {
EULA = "TRUE"
TYPE = "FORGE"
VERSION = "1.20.1"
FORGE_INSTALLER = "forge-1.20.1-47.2.19-installer.jar"
OVERRIDE_SERVER_PROPERTIES = "TRUE"
JVM_XX_OPTS = "-Xms12G -Xmx16G -XX:+UseG1GC -XX:+ParallelRefProcEnabled -XX:MaxGCPauseMillis=200 -XX:+UnlockExperimentalVMOptions -XX:+DisableExplicitGC -XX:+AlwaysPreTouch -XX:G1NewSizePercent=30 -XX:G1MaxNewSizePercent=40 -XX:G1HeapRegionSize=8M -XX:G1ReservePercent=20 -XX:G1HeapWastePercent=5 -XX:G1MixedGCCountTarget=4 -XX:InitiatingHeapOccupancyPercent=15 -XX:G1MixedGCLiveThresholdPercent=90 -XX:G1RSetUpdatingPauseTimePercent=5 -XX:SurvivorRatio=32 -XX:+PerfDisableSharedMem -XX:MaxTenuringThreshold=1"
TYPE = "PAPER"
VERSION = "1.20.4"
USE_AIKAR_FLAGS=true
MAX_MEMORY = "16G"
OPS = "Olim909"
MAX_PLAYERS = "5"
MOTD = "Minecraft ATM 9"
DIFFICULTY = "normal"
SPAWN_PROTECTION = "0"
ENFORCE_WHITELIST = "true"
WHITELIST = "Shmickey02"
OPS = "Shmickey02"
}
}
}

View file

@ -48,12 +48,10 @@ config:
lite:
enabled: true
routes:
- host: fugitives.rb.dcu.ie
backend: fugitives-mc.service.consul:25566
- host: mc.rb.dcu.ie
backend: vanilla-mc.service.consul:25567
- host: shemek.rb.dcu.ie
backend: shemek-mc.service.consul:25568
- host: olim909.rb.dcu.ie
backend: olim909-mc.service.consul:25568
- host: games.rb.dcu.ie
backend: games-mc.service.consul:25569
EOH

View file

@ -27,7 +27,7 @@ job "traefik" {
config {
image = "traefik"
network_mode = "host"
volumes = [
"local/traefik.toml:/etc/traefik/traefik.toml",
]

53
jobs/nginx/atlas.hcl Normal file
View file

@ -0,0 +1,53 @@
job "atlas" {
datacenters = ["aperture"]
type = "service"
meta {
git-sha = ""
}
group "nginx-atlas" {
count = 1
network {
port "http" {
to = 80
}
}
service {
port = "http"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.nginx-atlas.rule=Host(`redbrick.dcu.ie`) || Host(`rb.dcu.ie`)",
"traefik.http.routers.nginx-atlas.entrypoints=web,websecure",
"traefik.http.routers.nginx-atlas.tls.certresolver=lets-encrypt",
"traefik.http.routers.nginx-atlas.middlewares=redirect-user-web",
"traefik.http.middlewares.redirect-user-web.redirectregex.regex=https://redbrick\\.dcu\\.ie/~([^/]*)/?([^/].*)?",
"traefik.http.middlewares.redirect-user-web.redirectregex.replacement=https://$1.redbrick.dcu.ie/$2",
]
}
task "web" {
driver = "docker"
config {
image = "ghcr.io/redbrick/atlas:latest"
ports = ["http"]
}
resources {
cpu = 100
memory = 50
}
}
}
}

82
jobs/services/api.hcl Normal file
View file

@ -0,0 +1,82 @@
job "api" {
datacenters = ["aperture"]
type = "service"
group "api" {
count = 1
network {
port "http" {
to = 80
}
}
service {
name = "api"
port = "http"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.api.rule=Host(`api.redbrick.dcu.ie`)",
"traefik.http.routers.api.entrypoints=web,websecure",
"traefik.http.routers.api.tls.certresolver=lets-encrypt",
]
}
task "api" {
driver = "docker"
config {
image = "ghcr.io/redbrick/api:latest"
ports = ["http"]
volumes = [
"/oldstorage:/storage",
"/oldstorage/home:/home",
"local/ldap.secret:/etc/ldap.secret",
]
auth {
username = "${DOCKER_USER}"
password = "${DOCKER_PASS}"
}
}
template {
destination = "local/.env"
env = true
change_mode = "restart"
data = <<EOH
DOCKER_USER={{ key "api/ghcr/username" }}
DOCKER_PASS={{ key "api/ghcr/password" }}
AUTH_USERNAME={{ key "api/auth/username" }}
AUTH_PASSWORD={{ key "api/auth/password" }}
LDAP_URI={{ key "api/ldap/uri" }}
LDAP_ROOTBINDDN={{ key "api/ldap/rootbinddn" }}
LDAP_SEARCHBASE={{ key "api/ldap/searchbase" }}
EMAIL_DOMAIN=redbrick.dcu.ie
EMAIL_SERVER={{ key "api/smtp/server" }}
EMAIL_PORT=587
EMAIL_USERNAME={{ key "api/smtp/username" }}
EMAIL_PASSWORD={{ key "api/smtp/password" }}
EMAIL_SENDER={{ key "api/smtp/sender" }}
EOH
}
template {
destination = "local/ldap.secret"
data = "{{ key \"api/ldap/secret\" }}" # this is necessary as the secret has no EOF
}
resources {
cpu = 300
memory = 1024
}
}
}
}

View file

@ -0,0 +1,54 @@
job "brickbot2" {
datacenters = ["aperture"]
type = "service"
group "brickbot2" {
count = 1
task "brickbot2" {
driver = "docker"
config {
image = "ghcr.io/redbrick/brickbot2:latest"
auth {
username = "${DOCKER_USER}"
password = "${DOCKER_PASS}"
}
volumes = [
"local/ldap.secret:/etc/ldap.secret:ro",
]
}
template {
destination = "local/ldap.secret"
data = "{{ key \"api/ldap/secret\" }}" # this is necessary as the secret has no EOF
}
template {
destination = "local/.env"
env = true
change_mode = "restart"
data = <<EOH
DOCKER_USER={{ key "brickbot/ghcr/username" }}
DOCKER_PASS={{ key "brickbot/ghcr/password" }}
BOT_DB={{ key "brickbot/db" }}
BOT_TOKEN={{ key "brickbot/discord/token" }}
BOT_PRIVILEGED={{ key "brickbot/discord/privileged" }}
BOT_PREFIX=.
BOT_GUILD={{ key "brickbot/discord/guild" }}
LDAP_HOST={{ key "brickbot/ldap/host" }}
SMTP_DOMAIN={{ key "brickbot/smtp/domain" }}
SMTP_HOST={{ key "brickbot/smtp/host" }}
SMTP_PORT=587
SMTP_USERNAME={{ key "brickbot/smtp/username" }}
SMTP_PASSWORD={{ key "brickbot/smtp/password" }}
SMTP_SENDER={{ key "brickbot/smtp/sender" }}
API_USERNAME={{ key "brickbot/api/username" }}
API_PASSWORD={{ key "brickbot/api/password" }}
VERIFIED_ROLE={{ key "brickbot/discord/verified_role" }}
EOH
}
}
}
}

View file

@ -0,0 +1,50 @@
job "hedgedoc-backup" {
datacenters = ["aperture"]
type = "batch"
periodic {
crons = ["0 */3 * * * *"]
prohibit_overlap = true
}
group "db-backup" {
task "postgres-backup" {
driver = "raw_exec"
config {
command = "/bin/bash"
args = ["local/script.sh"]
}
template {
data = <<EOH
#!/bin/bash
file=/storage/backups/nomad/postgres/hedgedoc/postgresql-hedgedoc-$(date +%Y-%m-%d_%H-%M-%S).sql
mkdir -p /storage/backups/nomad/postgres/hedgedoc
alloc_id=$(nomad job status hedgedoc | grep running | tail -n 1 | cut -d " " -f 1)
job_name=$(echo ${NOMAD_JOB_NAME} | cut -d "/" -f 1)
nomad alloc exec -task hedgedoc-db $alloc_id pg_dumpall -U {{ key "hedgedoc/db/user" }} > "${file}"
find /storage/backups/nomad/postgres/hedgedoc/postgresql-hedgedoc* -ctime +3 -exec rm {} \; || true
if [ -s "$file" ]; then # check if file exists and is not empty
echo "Backup successful"
exit 0
else
rm $file
curl -H "Content-Type: application/json" -d \
'{"content": "<@&585512338728419341> `PostgreSQL` backup for **'"${job_name}"'** has just **FAILED**\nFile name: `'"$file"'`\nDate: `'"$(TZ=Europe/Dublin date)"'`\nTurn off this script with `nomad job stop '"${job_name}"'` \n\n## Remember to restart this backup job when fixed!!!"}' \
{{ key "postgres/webhook/discord" }}
fi
EOH
destination = "local/script.sh"
}
}
}
}

115
jobs/services/hedgedoc.hcl Normal file
View file

@ -0,0 +1,115 @@
job "hedgedoc" {
datacenters = ["aperture"]
type = "service"
group "web" {
network {
# mode = "bridge"
port "http" {
to = 3000
}
port "db" {
to = 5432
}
}
service {
name = "hedgedoc"
port = "http"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.frontend.headers.STSSeconds=63072000",
"traefik.frontend.headers.browserXSSFilter=true",
"traefik.frontend.headers.contentTypeNosniff=true",
"traefik.frontend.headers.customResponseHeaders=alt-svc:h2=l3sb47bzhpbelafss42pspxzqo3tipuk6bg7nnbacxdfbz7ao6semtyd.onion:443; ma=2592000",
"traefik.enable=true",
"traefik.port=${NOMAD_PORT_http}",
"traefik.http.routers.md.rule=Host(`md.redbrick.dcu.ie`,`md.rb.dcu.ie`)",
"traefik.http.routers.md.tls=true",
"traefik.http.routers.md.tls.certresolver=lets-encrypt",
]
}
task "app" {
driver = "docker"
constraint {
attribute = "${attr.unique.hostname}"
value = "chell"
}
config {
image = "quay.io/hedgedoc/hedgedoc:1.6.0"
ports = ["http"]
}
template {
data = <<EOH
CMD_IMAGE_UPLOAD_TYPE = "imgur"
CMD_IMGUR_CLIENTID = "{{ key "hedgedoc/imgur/clientid" }}"
CMD_IMGUR_CLIENTSECRET = "{{ key "hedgedoc/imgur/clientsecret" }}"
CMD_DB_URL = "postgres://{{ key "hedgedoc/db/user" }}:{{ key "hedgedoc/db/password" }}@{{ env "NOMAD_ADDR_db" }}/{{ key "hedgedoc/db/name" }}"
CMD_ALLOW_FREEURL = "false"
CMD_DEFAULT_PERMISSION = "private"
CMD_DOMAIN = "md.redbrick.dcu.ie"
CMD_ALLOW_ORIGIN = ["md.redbrick.dcu.ie", "md.rb.dcu.ie"]
CMD_HSTS_PRELOAD = "true"
CMD_USE_CDN = "true"
CMD_PROTOCOL_USESSL = "true"
CMD_URL_ADDPORT = "false"
CMD_ALLOW_EMAIL_REGISTER = "false"
CMD_ALLOW_ANONYMOUS = "false"
CMD_EMAIL = "false"
CMD_LDAP_URL = "{{ key "hedgedoc/ldap/url" }}"
CMD_LDAP_SEARCHBASE = "ou=accounts,o=redbrick"
CMD_LDAP_SEARCHFILTER = "{{`(uid={{username}})`}}"
CMD_LDAP_PROVIDERNAME = "Redbrick"
CMD_LDAP_USERIDFIELD = "uidNumber"
CMD_LDAP_USERNAMEFIELD = "uid"
CMD_ALLOW_GRAVATAR = "true"
CMD_SESSION_SECRET = "{{ key "hedgedoc/session/secret" }}"
CMD_LOG_LEVEL = "debug"
EOH
destination = "local/.env"
env = true
}
}
task "hedgedoc-db" {
driver = "docker"
constraint {
attribute = "${attr.unique.hostname}"
value = "chell"
}
config {
image = "postgres:9.6-alpine"
ports = ["db"]
volumes = [
"/opt/postgres/hedgedoc:/var/lib/postgresql/data"
]
}
template {
data = <<EOH
POSTGRES_PASSWORD={{ key "hedgedoc/db/password" }}
POSTGRES_USER={{ key "hedgedoc/db/user" }}
POSTGRES_NAME={{ key "hedgedoc/db/name" }}
EOH
destination = "local/db.env"
env = true
}
}
}
}

125
jobs/services/plausible.hcl Normal file
View file

@ -0,0 +1,125 @@
job "plausible" {
datacenters = ["aperture"]
type = "service"
group "web" {
network {
port "http" {
to = 8000
}
port "db" {
static = 8123
}
}
task "plausible" {
service {
name = "plausible"
port = "http"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.plausible.rule=Host(`plausible.redbrick.dcu.ie`)",
"traefik.http.routers.plausible.entrypoints=websecure",
"traefik.http.routers.plausible.tls.certresolver=lets-encrypt"
]
}
driver = "docker"
config {
image = "plausible/analytics:latest"
ports = ["http"]
command = "/bin/sh"
args = ["-c", "sleep 10 && /entrypoint.sh db migrate && /entrypoint.sh run"]
}
template {
data = <<EOH
BASE_URL=https://plausible.redbrick.dcu.ie
SECRET_KEY_BASE={{ key "plausible/secret" }}
DATABASE_URL=postgres://{{ key "plausible/db/user" }}:{{ key "plausible/db/password" }}@postgres.service.consul:5432/{{ key "plausible/db/name" }}
CLICKHOUSE_DATABASE_URL=http://{{ env "NOMAD_ADDR_db" }}/plausible_events_db
EOH
destination = "local/file.env"
env = true
}
resources {
memory = 500
}
}
task "clickhouse" {
constraint {
attribute = "${attr.unique.hostname}"
value = "chell"
}
service {
name = "plausible-clickhouse"
port = "db"
}
driver = "docker"
config {
image = "clickhouse/clickhouse-server:23.3.7.5-alpine"
ports = ["db"]
volumes = [
"/opt/plausible/clickhouse:/var/lib/clickhouse",
"local/clickhouse.xml:/etc/clickhouse-server/config.d/logging.xml:ro",
"local/clickhouse-user-config.xml:/etc/clickhouse-server/users.d/logging.xml:ro"
]
}
template {
data = <<EOH
<clickhouse>
<logger>
<level>warning</level>
<console>true</console>
</logger>
<!-- Stop all the unnecessary logging -->
<query_thread_log remove="remove"/>
<query_log remove="remove"/>
<text_log remove="remove"/>
<trace_log remove="remove"/>
<metric_log remove="remove"/>
<asynchronous_metric_log remove="remove"/>
<session_log remove="remove"/>
<part_log remove="remove"/>
</clickhouse>
EOH
destination = "local/clickhouse.xml"
}
template {
data = <<EOH
<clickhouse>
<profiles>
<default>
<log_queries>0</log_queries>
<log_query_threads>0</log_query_threads>
</default>
</profiles>
</clickhouse>
EOH
destination = "local/clickhouse-user-config.xml"
}
resources {
memory = 800
}
}
}
}

View file

@ -0,0 +1,50 @@
job "postgres-backup" {
datacenters = ["aperture"]
type = "batch"
periodic {
crons = ["0 */3 * * * *"]
prohibit_overlap = true
}
group "db-backup" {
task "postgres-backup" {
driver = "raw_exec"
config {
command = "/bin/bash"
args = ["local/script.sh"]
}
template {
data = <<EOH
#!/bin/bash
file=/storage/backups/nomad/postgres/postgres-$(date +%Y-%m-%d_%H-%M-%S).sql
mkdir -p /storage/backups/nomad/postgres
alloc_id=$(nomad job status postgres | grep running | tail -n 1 | cut -d " " -f 1)
job_name=$(echo ${NOMAD_JOB_NAME} | cut -d "/" -f 1)
nomad alloc exec $alloc_id pg_dumpall -U {{ key "postgres/username/root" }} > "${file}"
find /storage/backups/nomad/postgres/hedgedoc/postgres* -ctime +3 -exec rm {} \; || true
if [ -s "$file" ]; then # check if file exists and is not empty
echo "Backup successful"
exit 0
else
rm $file
curl -H "Content-Type: application/json" -d \
'{"content": "<@&585512338728419341> `PostgreSQL` backup for **'"${job_name}"'** has just **FAILED**\nFile name: `'"$file"'`\nDate: `'"$(TZ=Europe/Dublin date)"'`\nTurn off this script with `nomad job stop '"${job_name}"'` \n\n## Remember to restart this backup job when fixed!!!"}' \
{{ key "postgres/webhook/discord" }}
fi
EOH
destination = "local/script.sh"
}
}
}
}

View file

@ -0,0 +1,96 @@
job "postgres" {
datacenters = ["aperture"]
constraint {
attribute = "${attr.unique.hostname}"
value = "wheatley"
}
group "db" {
network {
port "db" {
static = 5432
}
}
task "postgres-db" {
driver = "docker"
template {
data = <<EOH
POSTGRES_PASSWORD="{{ key "postgres/password/root" }}"
POSTGRES_USER="{{ key "postgres/username/root" }}"
EOH
destination = "local/file.env"
env = true
}
config {
image = "postgres:latest"
ports = ["db"]
volumes = [
"/opt/postgres:/var/lib/postgresql/data",
"local/postgresql.conf:/etc/postgres/postgresql.conf",
"local/pg_hba.conf:/pg_hba.conf",
]
}
template {
data = <<EOH
max_connections = 100
shared_buffers = 2GB
effective_cache_size = 6GB
maintenance_work_mem = 512MB
checkpoint_completion_target = 0.9
wal_buffers = 16MB
default_statistics_target = 100
random_page_cost = 1.1
effective_io_concurrency = 200
work_mem = 5242kB
min_wal_size = 1GB
max_wal_size = 4GB
max_worker_processes = 4
max_parallel_workers_per_gather = 2
max_parallel_workers = 4
max_parallel_maintenance_workers = 2
hba_file = "/pg_hba.conf"
EOH
destination = "local/postgresql.conf"
}
template {
data = <<EOH
local all all trust
host all all 127.0.0.1/32 trust
host all all ::1/128 trust
local replication all trust
host replication all 127.0.0.1/32 trust
host replication all ::1/128 trust
host all all all scram-sha-256
EOH
destination = "local/pg_hba.conf"
}
resources {
cpu = 400
memory = 800
}
service {
name = "postgres"
port = "db"
check {
type = "tcp"
interval = "2s"
timeout = "2s"
}
}
}
}
}

View file

@ -0,0 +1,197 @@
job "privatebin" {
datacenters = ["aperture"]
type = "service"
group "privatebin" {
count = 1
network {
port "http" {
to = 8080
}
}
service {
name = "privatebin"
port = "http"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.privatebin.rule=Host(`paste.rb.dcu.ie`) || Host(`paste.redbrick.dcu.ie`)",
"traefik.http.routers.privatebin.entrypoints=web,websecure",
"traefik.http.routers.privatebin.tls.certresolver=lets-encrypt",
]
}
task "privatebin" {
driver = "docker"
config {
image = "privatebin/nginx-fpm-alpine:stable"
ports = ["http"]
volumes = [
"local/conf.php:/srv/data/conf.php",
]
}
template {
destination = "local/.env"
env = true
change_mode = "restart"
data = <<EOH
TZ=Europe/Dublin
PHP_TZ=Europe/Dublin
CONFIG_PATH=/srv/data/
EOH
}
template {
destination = "local/conf.php"
data = <<EOH
[main]
name = "Redbrick PasteBin"
basepath = "https://paste.rb.dcu.ie/"
discussion = true
opendiscussion = false
password = true
fileupload = true
burnafterreadingselected = false
defaultformatter = "markdown"
; (optional) set a syntax highlighting theme, as found in css/prettify/
syntaxhighlightingtheme = "sons-of-obsidian"
; size limit per paste or comment in bytes, defaults to 10 Mebibytes
sizelimit = 10485760
; template to include, default is "bootstrap" (tpl/bootstrap.php)
template = "bootstrap-dark"
; (optional) info text to display
; use single, instead of double quotes for HTML attributes
;info = "More information on the <a href='https://privatebin.info/'>project page</a>."
; (optional) notice to display
; notice = "Note: Distro is a Goombean."
languageselection = false
languagedefault = "en"
; (optional) URL shortener address to offer after a new paste is created.
; It is suggested to only use this with self-hosted shorteners as this will leak
; the pastes encryption key.
urlshortener = "https://s.rb.dcu.ie/rest/v1/short-urls/shorten?apiKey={{ key "privatebin/shlink/api" }}&format=txt&longUrl="
qrcode = true
email = true
; Can be set to one these values:
; "none" / "identicon" (default) / "jdenticon" / "vizhash".
icon = "identicon"
; Content Security Policy headers allow a website to restrict what sources are
; allowed to be accessed in its context. You need to change this if you added
; custom scripts from third-party domains to your templates, e.g. tracking
; scripts or run your site behind certain DDoS-protection services.
; Check the documentation at https://content-security-policy.com/
; Notes:
; - If you use a bootstrap theme, you can remove the allow-popups from the
; sandbox restrictions.
; - By default this disallows to load images from third-party servers, e.g. when
; they are embedded in pastes. If you wish to allow that, you can adjust the
; policy here. See https://github.com/PrivateBin/PrivateBin/wiki/FAQ#why-does-not-it-load-embedded-images
; for details.
; - The 'unsafe-eval' is used in two cases; to check if the browser supports
; async functions and display an error if not and for Chrome to enable
; webassembly support (used for zlib compression). You can remove it if Chrome
; doesn't need to be supported and old browsers don't need to be warned.
; cspheader = "default-src 'none'; base-uri 'self'; form-action 'none'; manifest-src 'self'; connect-src * blob:; script-src 'self' 'unsafe-eval'; style-src 'self'; font-src 'self'; frame-ancestors 'none'; img-src 'self' data: blob:; media-src blob:; object-src blob:; sandbox allow-same-origin allow-scripts allow-forms allow-popups allow-modals allow-downloads"
zerobincompatibility = false
httpwarning = true
compression = "zlib"
[expire]
; make sure the value exists in [expire_options]
default = "1week"
[expire_options]
5min = 300
10min = 600
1hour = 3600
1day = 86400
1week = 604800
2week = 1209600
; Well this is not *exactly* one month, it's 30 days:
1month = 2592000
1year = 31536000
never = 0
[formatter_options]
plaintext = "Plain Text"
markdown = "Markdown"
syntaxhighlighting = "Source Code"
[traffic]
; time limit between calls from the same IP address in seconds
; Set this to 0 to disable rate limiting.
limit = 10
; (optional) Set IPs addresses (v4 or v6) or subnets (CIDR) which are exempted
; from the rate-limit. Invalid IPs will be ignored. If multiple values are to
; be exempted, the list needs to be comma separated. Leave unset to disable
; exemptions.
; exempted = "1.2.3.4,10.10.10/24"
; (optional) If you want only some source IP addresses (v4 or v6) or subnets
; (CIDR) to be allowed to create pastes, set these here. Invalid IPs will be
; ignored. If multiple values are to be exempted, the list needs to be comma
; separated. Leave unset to allow anyone to create pastes.
; creators = "1.2.3.4,10.10.10/24"
; (optional) if your website runs behind a reverse proxy or load balancer,
; set the HTTP header containing the visitors IP address, i.e. X_FORWARDED_FOR
; header = "X_FORWARDED_FOR"
[purge]
; minimum time limit between two purgings of expired pastes, it is only
; triggered when pastes are created
; Set this to 0 to run a purge every time a paste is created.
limit = 300
; maximum amount of expired pastes to delete in one purge
; Set this to 0 to disable purging. Set it higher, if you are running a large
; site
batchsize = 10
[model]
class = Database
[model_options]
dsn = "pgsql:host=postgres.service.consul;dbname={{ key "privatebin/db/name" }}"
tbl = "privatebin_" ; table prefix
usr = "{{ key "privatebin/db/user" }}"
pwd = "{{ key "privatebin/db/password" }}"
opt[12] = true ; PDO::ATTR_PERSISTENT ; use persistent connections - default
EOH
}
}
}
}

91
jobs/services/shlink.hcl Normal file
View file

@ -0,0 +1,91 @@
job "shlink" {
datacenters = ["aperture"]
type = "service"
group "web" {
network {
port "api" {
to = 8080
}
port "web" {
to = 8080
}
}
service {
name = "shlink"
port = "api"
tags = [
"traefik.enable=true",
"traefik.http.routers.shlink-api.rule=Host(`s.rb.dcu.ie`)",
"traefik.http.routers.shlink-api.tls=true",
"traefik.http.routers.shlink-api.tls.certresolver=lets-encrypt",
]
}
task "shlink" {
driver = "docker"
config {
image = "shlinkio/shlink"
ports = ["api"]
}
template {
data = <<EOH
DEFAULT_DOMAIN=s.rb.dcu.ie
IS_HTTPS_ENABLED=true
DB_DRIVER=postgres
DB_USER={{ key "shlink/db/user" }}
DB_PASSWORD={{ key "shlink/db/password" }}
DB_NAME={{ key "shlink/db/name" }}
DB_HOST=postgres.service.consul
GEOLITE_LICENSE_KEY={{ key "shlink/geolite/key" }}
EOH
destination = "local/file.env"
env = true
}
resources {
memory = 1000
}
}
# task "shlink-web-client" {
# driver = "docker"
#
# config {
# image = "shlinkio/shlink-web-client"
# ports = ["web"]
# }
#
# template {
# data = <<EOH
#SHLINK_SERVER_URL=https://s.rb.dcu.ie
#SHLINK_API_KEY={{ key "shlink/api/key" }}
#EOH
# destination = "local/file.env"
# env = true
# }
#
#
#
# service {
# name = "shlink"
# port = "api"
#
# tags = [
# "traefik.enable=true",
# "traefik.http.routers.shlink-web.rule=Host(`shlink.rb.dcu.ie`)",
# "traefik.http.routers.shlink-web.tls=true",
# "traefik.http.routers.shlink-web.tls.certresolver=lets-encrypt",
# ]
# }
# resources {
# memory = 500
# }
# }
}
}

View file

@ -0,0 +1,68 @@
job "vaultwarden" {
datacenters = ["aperture"]
type = "service"
group "vaultwarden" {
count = 1
network {
port "http" {
to = 80
}
}
service {
name = "vaultwarden"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.vaultwarden.rule=Host(`vault.redbrick.dcu.ie`)",
"traefik.http.routers.vaultwarden.entrypoints=websecure",
"traefik.http.routers.vaultwarden.tls.certresolver=lets-encrypt",
]
}
task "vaultwarden" {
driver = "docker"
config {
image = "vaultwarden/server:latest-alpine"
ports = ["http"]
volumes = [
"/storage/nomad/vaultwarden:/data"
]
}
template {
data = <<EOF
DOMAIN=https://vault.redbrick.dcu.ie
DATABASE_URL=postgresql://{{ key "vaultwarden/db/user" }}:{{ key "vaultwarden/db/password" }}@postgres.service.consul:5432/{{ key "vaultwarden/db/name" }}
SIGNUPS_ALLOWED=false
INVITATIONS_ALLOWED=true
# This is not the actual token, but a hash of it. Vaultwarden does not like the actual token.
ADMIN_TOKEN={{ key "vaultwarden/admin/hash" }}
EOF
destination = "local/env"
env = true
}
# These yubico variables are not necessary for yubikey support, only to verify the keys with yubico.
#YUBICO_CLIENT_ID={{ key "vaultwarden/yubico/client_id" }}
#YUBICO_SECRET_KEY={{ key "vaultwarden/yubico/secret_key" }}
#SMTP_HOST={{ key "vaultwarden/smtp/host" }}
#SMTP_FROM={{ key "vaultwarden/smtp/from" }}
#SMTP_PORT={{ key "vaultwarden/smtp/port" }}
#SMTP_SECURITY=force_tls
#SMTP_USERNAME={{ key "vaultwarden/smtp/username" }}
#SMTP_PASSWORD={{ key "vaultwarden/smtp/password" }}
resources {
cpu = 500
memory = 500
}
}
}
}

51
jobs/services/wetty.hcl Normal file
View file

@ -0,0 +1,51 @@
job "wetty" {
datacenters = ["aperture"]
type = "service"
group "wetty" {
count = 1
network {
port "http" {
to = 3000
}
}
service {
name = "wetty"
port = "http"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
tags = [
"traefik.enable=true",
"traefik.http.routers.wetty.rule=Host(`wetty.rb.dcu.ie`)",
"traefik.http.routers.wetty.entrypoints=web,websecure",
"traefik.http.routers.wetty.tls.certresolver=lets-encrypt",
]
}
task "wetty" {
driver = "docker"
config {
image = "wettyoss/wetty"
ports = ["http"]
}
template {
destination = "local/.env"
env = true
data = <<EOH
SSHHOST={{ key "wetty/ssh/host" }}
SSHPORT=22
EOH
}
}
}
}

View file

@ -28,6 +28,7 @@ DISCORD_TOKEN={{ key "user-projects/ayden/gomlbot/discord/token" }}
DOCKER_USER={{ key "user-projects/ayden/ghcr/username" }}
DOCKER_PASS={{ key "user-projects/ayden/ghcr/password" }}
DEBUG=false
MONGO_DB={{ key "user-projects/ayden/gomlbot/mongo/db" }}
EOH
destination = "local/.env"
env = true

View file

@ -0,0 +1,26 @@
job "midnight-calendarbot" {
datacenters = ["aperture"]
type = "service"
group "calendarbot" {
count = 1
task "calendarbot" {
driver = "docker"
config {
image = "ghcr.io/nightmarishblue/calendarbot:master"
force_pull = true
}
template {
data = <<EOH
BOT_TOKEN={{ key "user-projects/midnight/calendarbot/discord/token" }}
APPLICATION_ID={{ key "user-projects/midnight/calendarbot/discord/appid" }}
EOH
destination = "local/.env"
env = true
}
}
}
}

View file

@ -64,13 +64,16 @@ The VMs are configured with cloud-init. Their [docs](https://cloudinit.readthedo
...
args = [
...
"virtio-net-pci,netdev=hn0,id=nic1,mac=52:54:84:ba:49:22", # make sure this MAC address is unique!!
"-smbios",
"type=1,serial=ds=nocloud-net;s=http://136.206.16.5:8000/",
]
...
```
Here in the args block:
- we define that the VM will have a network device using the `virtio` driver, we pass it an `id` and a random ***unique*** MAC address
@ -83,3 +86,4 @@ Here in the args block:
To create a new VM, you'll need to create a new job file and a cloud-init configuration file. Copy any of the existing job files and modify them to suit your needs. The cloud-init configuration files can be copied and changed based on the user also. **Remember to ensure the MAC addresses are unique!**

View file

@ -0,0 +1,95 @@
job "admin-exams" {
datacenters = ["aperture"]
group "ayden-vm" {
network {
mode = "host"
}
service {
name = "ayden-vm"
}
task "ayden-vm" {
constraint {
attribute = "${attr.unique.hostname}"
value = "chell" # task must be scheduled on a host with the bridge device configured
}
resources {
cpu = 12000
memory = 4096
}
artifact {
source = "http://136.206.16.5:8000/base-images/debian-12-genericcloud-amd64-30G.qcow2"
destination = "local/ayden-vm.qcow2"
mode = "file"
}
driver = "qemu"
config {
image_path = "local/ayden-vm.qcow2"
accelerator = "kvm"
drive_interface = "virtio"
args = [
"-netdev",
"bridge,id=hn0",
"-device",
"virtio-net-pci,netdev=hn0,id=nic1,mac=52:54:84:ba:49:20", # mac address must be unique or else you will regret it
"-smbios",
"type=1,serial=ds=nocloud-net;s=http://136.206.16.5:8000/ayden-vm/",
]
}
}
}
group "hypnoant-vm" {
network {
mode = "host"
}
service {
name = "hypnoant-vm"
}
task "hypnoant-vm" {
constraint {
attribute = "${attr.unique.hostname}"
value = "wheatley"
}
resources {
cpu = 12000
memory = 4096
}
artifact {
source = "http://136.206.16.5:8000/base-images/debian-12-genericcloud-amd64-30G.qcow2"
destination = "local/hypnoant-vm.qcow2"
mode = "file"
}
driver = "qemu"
config {
image_path = "local/hypnoant-vm.qcow2"
accelerator = "kvm"
drive_interface = "virtio"
args = [
"-netdev",
"bridge,id=hn0",
"-device",
"virtio-net-pci,netdev=hn0,id=nic1,mac=52:54:84:ba:49:22",
"-smbios",
"type=1,serial=ds=nocloud-net;s=http://136.206.16.5:8000/hypnoant-vm/",
]
}
}
}
}