~sumner/nixos-configuration

6a577fb8da7a0485fcf4d92ec13f9a172c1d7771 — Sumner Evans 2 years ago 390a71d master
sr.ht -> GitHub
129 files changed, 1 insertions(+), 6355 deletions(-)

D .build.yml
D .envrc
D .gitattributes
D .gitignore
D .gitmodules
D .vim/coc-settings.json
D LICENSE
M README.md
D archives/README
D archives/initialize-droplet.py
D archives/nixos-infect
D cachix.nix
D cachix/nix-community.nix
D cachix/nixpkgs-wayland.nix
D cachix/sumnerevans.nix
D configuration.nix
D host-configurations/bespin.nix
D host-configurations/coruscant.nix
D host-configurations/jedha.nix
D host-configurations/kessel.nix
D host-configurations/morak.nix
D host-configurations/mustafar/default.nix
D host-configurations/mustafar/intel-sof-firmware.nix
D host-configurations/mustafar/sof-topology-hatch-1.5.tar.xz
D host-configurations/tatooine.nix
D modules/beeper.nix
D modules/default.nix
D modules/hardware/bluetooth.nix
D modules/hardware/bootloader.nix
D modules/hardware/default.nix
D modules/hardware/firewall.nix
D modules/hardware/laptop.nix
D modules/hardware/networking.nix
D modules/hardware/tmpfs.nix
D modules/hardware/v4l2loopback.nix
D modules/nix.nix
D modules/programs/default.nix
D modules/programs/tmux.nix
D modules/services/acme.nix
D modules/services/airsonic.nix
D modules/services/bitwarden.nix
D modules/services/default.nix
D modules/services/docker.nix
D modules/services/goaccess.nix
D modules/services/gonic.nix
D modules/services/grafana.nix
D modules/services/gui/default.nix
D modules/services/gui/fonts.nix
D modules/services/gui/i3wm.nix
D modules/services/gui/sway.nix
D modules/services/healthcheck.nix
D modules/services/isso.nix
D modules/services/journald.nix
D modules/services/longview.nix
D modules/services/matrix/coturn.nix
D modules/services/matrix/default.nix
D modules/services/matrix/heisenbridge.nix
D modules/services/matrix/linkedin-matrix.nix
D modules/services/matrix/matrix-vacation-responder.nix
D modules/services/matrix/mjolnir.nix
D modules/services/matrix/quotesfilebot.nix
D modules/services/matrix/standupbot.nix
D modules/services/matrix/synapse/cleanup-synapse.nix
D modules/services/matrix/synapse/default.nix
D modules/services/matrix/synapse/shared-config.nix
D modules/services/mumble.nix
D modules/services/nginx.nix
D modules/services/postgresql.nix
D modules/services/pr-tracker.nix
D modules/services/restic.nix
D modules/services/sshd.nix
D modules/services/syncthing.nix
D modules/services/xandikos.nix
D modules/time.nix
D modules/users/default.nix
D modules/users/root.nix
D modules/users/server-pubkeys.nix
D modules/users/sumner-ssh-pubkeys.nix
D modules/users/sumner.nix
D nixos-install-scripts
D notes/transition-nevarro-kessel.md
D pkgs/heisenbridge.nix
D pkgs/linkedin-matrix.nix
D pkgs/linkedin-messaging.nix
D pkgs/matrix-vacation-responder/default.nix
D pkgs/matrix-vacation-responder/deps.nix
D pkgs/pr-tracker.nix
D pkgs/quotesfilebot/default.nix
D pkgs/quotesfilebot/deps.nix
D pkgs/standupbot/default.nix
D pkgs/standupbot/deps.nix
D secrets/beeper-localenv-root-ca.pem
D secrets/coruscant-ip
D secrets/coturn-static-auth-secret
D secrets/isso-admin-password
D secrets/isso-comments-smtp-password
D secrets/longview/bespin
D secrets/longview/nevarro
D secrets/matrix/appservices/heisenbridge.nix
D secrets/matrix/appservices/linkedin-matrix.nix
D secrets/matrix/bots/marshal
D secrets/matrix/bots/quotesfilebot
D secrets/matrix/bots/standupbot
D secrets/matrix/bots/vacation-responder-password
D secrets/matrix/cleanup-synapse/kessel
D secrets/matrix/cleanup-synapse/morak
D secrets/matrix/cleanup-synapse/nevarro
D secrets/matrix/nevarro-smtp-pass
D secrets/matrix/registration-shared-secret/kessel
D secrets/matrix/registration-shared-secret/morak
D secrets/matrix/registration-shared-secret/nevarro
D secrets/matrix/shared-secret-auth/nevarro.space
D secrets/nix-remote-build
D secrets/pr-tracker-github-token
D secrets/restic-environment-variables
D secrets/restic-password
D secrets/syncthing-admin-password
D secrets/wireguard-coruscant-presharedkey
D secrets/wireguard-ipad-presharedkey
D secrets/wireguard-jedha-ip
D secrets/wireguard-jedha-presharedkey
D secrets/wireguard-jedha-privatekey
D secrets/wireguard-mustafar-ip
D secrets/wireguard-mustafar-presharedkey
D secrets/wireguard-mustafar-privatekey
D secrets/wireguard-pixel-presharedkey
D secrets/wireguard-privatekey
D secrets/xandikos
D shell.nix
D .build.yml => .build.yml +0 -37
@@ 1,37 0,0 @@
image: nixos/unstable
secrets:
  # SSH Deploy Key
  - f219888a-80af-4275-a777-89e8c7d277f0
environment:
  REPO_NAME: nixos-configuration
triggers:
  - action: email
    condition: failure
    to: alerts@sumnerevans.com
tasks:
  # Skip everything if not on master.
  - skip-not-master: |
      cd $REPO_NAME
      git branch --contains | grep master || echo "Skipping deploy since not on master"
      git branch --contains | grep master || complete-build

  - setup: |
      echo "cd $REPO_NAME" >> ~/.buildenv
      time ssh-keyscan kessel.nevarro.space >> ~/.ssh/known_hosts
      time ssh-keyscan morak.sumnerevans.com >> ~/.ssh/known_hosts
      time ssh-keyscan bespin.sumnerevans.com >> ~/.ssh/known_hosts

  - switch-commit: |
      ssh root@kessel.nevarro.space  "cd /etc/nixos && git fetch && git reset --hard $(git rev-parse HEAD)"
      ssh root@morak.sumnerevans.com "cd /etc/nixos && git fetch && git reset --hard $(git rev-parse HEAD)"
      ssh root@bespin.sumnerevans.com "cd /etc/nixos && git fetch && git reset --hard $(git rev-parse HEAD)"

  - remote-build: |
      ssh root@kessel.nevarro.space  "time nixos-rebuild build --show-trace"
      ssh root@morak.sumnerevans.com "time nixos-rebuild build --show-trace"
      ssh root@bespin.sumnerevans.com "time nixos-rebuild build --show-trace"

  - switch-generation: |
      ssh root@kessel.nevarro.space  "time nixos-rebuild switch --show-trace"
      ssh root@morak.sumnerevans.com "time nixos-rebuild switch --show-trace"
      ssh root@bespin.sumnerevans.com "time nixos-rebuild switch --show-trace"

D .envrc => .envrc +0 -13
@@ 1,13 0,0 @@
use nix

# Export all of the environment variables to make restic work.
restic_hostname=$(cat .restic_hostname || hostname)
export RESTIC_REPOSITORY=b2:test-scarif-backup:${restic_hostname}
set -a
. ./secrets/restic-environment-variables
set +a
export RESTIC_PASSWORD_FILE=secrets/restic-password

watch_file secrets/restic-environment-variables
watch_file .restic_hostname
unset PS1

D .gitattributes => .gitattributes +0 -2
@@ 1,2 0,0 @@
# git crypt
secrets/**      filter=git-crypt diff=git-crypt

D .gitignore => .gitignore +0 -6
@@ 1,6 0,0 @@
.direnv
hardware-configuration.nix
result

.venv
.restic_hostname

D .gitmodules => .gitmodules +0 -3
@@ 1,3 0,0 @@
[submodule "nixos-install-scripts"]
	path = nixos-install-scripts
	url = https://github.com/sumnerevans/nixos-install-scripts.git

D .vim/coc-settings.json => .vim/coc-settings.json +0 -5
@@ 1,5 0,0 @@
{
  "python.linting.pylintEnabled": false,
  "python.linting.mypyEnabled": true,
  "python.linting.enabled": true
}
\ No newline at end of file

D LICENSE => LICENSE +0 -21
@@ 1,21 0,0 @@
MIT License

Copyright (c) 2020 Sumner Evans

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

M README.md => README.md +1 -86
@@ 1,88 1,3 @@
# NixOS Configuration

[![builds.sr.ht status](https://builds.sr.ht/~sumner/nixos-configuration/commits/.build.yml.svg)](https://builds.sr.ht/~sumner/nixos-configuration/commits/.build.yml)
[![HealthCheck Status](https://healthchecks.io/badge/b8bf9b9d-b4bb-4c92-b546-1c69a0/BpOIMYGi.svg)](https://healthchecks.io/projects/8384107b-0803-48b3-bd99-7702d1214ca5/checks/)

This repository contains the NixOS configuration for my personal computers and
servers.

You can find my Home Manager Config here:
https://github.com/sumnerevans/home-manager-config/

## Hosts

- **Personal Computers**

  - **coruscant**: custom desktop
  - **jedha**: ThinkPad T580
  - **mustafar**: Samsung Galaxy Chromebook

- **Servers**

  - **bespin**: Linode VPS (non-realtime critical infrastructure)

    - [Syncthing](https://syncthing.net)

  - **morak**: Hetzner Cloud VPS (CPX11)

    - Personal Websites
    - [Airsonic](https://airsonic.github.io)
    - [GoAccess](https://goaccess.io/)
    - [Isso](https://posativ.org/isso/)
    - [Matrix Vacation Responder](https://gitlab.com/beeper/matrix-vacation-responder)
    - [Murmur for Mumble](https://www.mumble.info/)
    - [pr-tracker](https://git.sr.ht/~sumner/pr-tracker)
    - [Synapse](https://github.com/matrix-org/synapse) (sumnerevans.com)
    - [Syncthing](https://syncthing.net)
    - [vaultwarden](https://github.com/dani-garcia/vaultwarden)
    - [Xandikos](https://www.xandikos.org/)

  - **kessel**: Hetzner Cloud VPS (CCX12, primary Synapse infrastructure)

    - nevarro.space website
    - [quotesfilebot](https://gitlab.com/jrrobel/quotes-file-bot)
    - [standupbot](https://sr.ht/~sumner/standupbot)
    - [Synapse](https://github.com/matrix-org/synapse) (nevarro.space)
      - [Heisenbridge](https://github.com/hifi/heisenbridge)
      - [LinkedIn Matrix](https://gitlab.com/beeper/linkedin)

## Installation Instructions

```
.----------------------------------------------------------------------------.
| WARNING:                                                                   |
|                                                                            |
| Don't install somebody else's NixOS configs. Use them as inspiration, but  |
| don't actually just blindly copy.                                          |
'----------------------------------------------------------------------------'
```

To install this configuration,

1. Clone this repository to `/etc/nixos` on a NixOS system.
2. Unlock the repo using `git-crypt unlock /path/to/git-crypt/key`.
3. Create a new host configuration in the `host-configurations` folder.
4. Source the host configuration from `hardware-configuration.nix`.
5. Run `sudo nixos-rebuild switch --upgrade`.

## Goals

- Infrastructure as code
- Immutable infrastructure (as much as possible)
- Everything backed up to B2
- Everything backed up to onsite location

### Uptime

- Can blow away all servers (but not data) and restore in under an hour
- Can restore all data within one day after catastrophic failure (everything
  goes down, including data)

  - From local backup: 1 day
  - From B2: 2 days

## Backup Strategy

I am using [Restic](https://github.com/restic/restic) to backup everything on my
server, and all of my important documents are stored in Syncthing, which is
backed up from my server.
Migrated to GitHub: https://github.com/sumnerevans/nixos-configuration

D archives/README => archives/README +0 -3
@@ 1,3 0,0 @@
This directory is here just in case any of my non-Python dependencies gets
taken down. I'm just going to assume that nobody's going to get rid of the
``python-digitalocean`` module.

D archives/initialize-droplet.py => archives/initialize-droplet.py +0 -221
@@ 1,221 0,0 @@
#! /bin/env python3
"""
Environment variables:

    DIGITALOCEAN_ACCESS_TOKEN
        The token to use to authenticate to the DigitalOcean API.
"""

import os
import sys
import subprocess
from pathlib import Path

import digitalocean


def prompt_select(
    prompt,
    options,
    formatter,
    multiple=False,
    default=None,
    allow_none=False,
):
    options = list(options)
    while True:
        print()
        print(prompt)
        default_idx = None
        for i, opt in enumerate(options):
            print(f'  {i}: {formatter(opt)}')
            if default and opt.slug == default:
                default_idx = i
        print()

        try:
            how_many = {
                (True, True): 'zero or more',
                (True, False): 'one or more',
                (False, True): 'zero or one',
                (False, False): 'one',
            }[(multiple, allow_none)]

            result = set(
                map(
                    int,
                    input('Enter {} of the indexes{}: '.format(
                        how_many,
                        f' [{default_idx}]' if default else '',
                    )).split(),
                ))

            if len(result) == 0:
                if default:
                    if multiple:
                        return [options[default_idx]]
                    else:
                        return options[default_idx]
                elif allow_none:
                    return None
                continue
            elif len(result) == 1:
                if multiple:
                    return [options[list(result)[0]]]
                else:
                    return options[list(result)[0]]
            elif multiple is False:
                continue
            else:
                return [options[i] for i in result]
        except Exception:
            # Could be something that wasn't a number, invalid index, etc.
            # Regardless, reprompt.
            pass


def prompt_proceed(prompt):
    while True:
        proceed = input(prompt + ' [yN]: ')
        if proceed in ('y', 'Y'):
            return True
        elif proceed in ('n', 'N', ''):
            return False


token = (os.environ.get('DIGITALOCEAN_ACCESS_TOKEN')
         or input('DigitalOcean Access Token: '))

print(f'Using Access Token: {token}')
manager = digitalocean.Manager(token=token)

ips = manager.get_all_floating_ips()
floating_ip_str = os.environ.get('DROPLET_FLOATING_IP')
floating_ip_to_use = None if not floating_ip_str else [
    i for i in ips if i.ip == floating_ip_str
][0]
if not floating_ip_to_use:
    floating_ip_to_use = prompt_select(
        'Which floating IP do you want to assign to the droplet?',
        ips,
        lambda i: str(i),
        allow_none=True,
    )

# Prompt for the keys to auto-add to the droplet.
keys_to_use = prompt_select(
    'Which SSH keys do you want to be able to access the machine?',
    manager.get_all_sshkeys(),
    lambda s: s.name,
    multiple=True,
)

name = os.environ.get('DROPLET_NAME') or input('Droplet name: ')

region = os.environ.get('DROPLET_REGION')
if not region:
    region = prompt_select(
        'Which droplet region do you want to use?',
        manager.get_all_regions(),
        lambda r: f'{r.name}: {r.slug}',
        default='sfo2',
    ).slug

size = os.environ.get('DROPLET_SIZE')
if not size:
    size = prompt_select(
        'Which droplet size do you want to use?',
        filter(lambda m: m.memory <= 8192, manager.get_all_sizes()),
        lambda s: f'{s.memory}MiB/{s.disk}GiB@${s.price_monthly}/mo: {s.slug}',
        default='s-1vcpu-1gb',
    ).slug

image = os.environ.get('DROPLET_IMAGE')
if not image:
    image = prompt_select(
        'Which droplet size do you want to use?',
        filter(
            lambda i: (i.slug is not None and region in i.regions and
                       ('ubuntu' in i.slug or 'fedora' in i.slug)),
            manager.get_all_images(),
        ),
        lambda i: f'{i.name}: {i.slug}',
        default='ubuntu-16-04-x64',
    ).slug

# Extract all of the secrets and create runcmds for the initial sync.
cwd = Path(__file__).parent.resolve()
subprocess.run(
    [str(cwd.joinpath('secrets_file_manager.sh')), 'extract'],
    capture_output=True,
)
secrets = cwd.joinpath('secrets')
secrets_runcmds = []
for path in secrets.iterdir():
    with open(path, 'r') as f:
        lines = [
            l.strip().replace('"', r'\"').replace('`', r'\`')
            for l in f.readlines()
        ]
        for line in lines:
            secrets_runcmds.append(
                f'  - echo "{line}" >> /etc/nixos/secrets/{path.name}')

secrets_runcmds = '\n'.join(secrets_runcmds)

user_data = f'''#cloud-config

runcmd:
  - apt install -y git
  - git clone https://git.sr.ht/~sumner/infrastructure /etc/nixos
  - mkdir -p /etc/nixos/secrets
{secrets_runcmds}
  - curl https://raw.githubusercontent.com/elitak/nixos-infect/master/nixos-infect | PROVIDER=digitalocean NIXOS_IMPORT=./host.nix NIX_CHANNEL=nixos-unstable bash 2>&1 | tee /tmp/infect.log
'''

floating_ip_text = '\n' if not floating_ip_to_use else f'''
The following floating IP will be assigned to the machine:
    {floating_ip_to_use.ip}
'''

print()
print('=' * 80)
print('SUMMARY:')
print('=' * 80)
print(f'''
A droplet named "{name}" with initial image of "{image}" and size
"{size}" will be created in the {region} region.

The following SSH keys will be able to access the machine:
    { ', '.join(map(lambda k: k.name, keys_to_use))}
{floating_ip_text}
It will be configured with the following cloud configuration:

{user_data}''')

if not prompt_proceed(
        'Would you like to create a droplet with this configuration?'):
    print('Cancelling!')
    sys.exit(1)

droplet = digitalocean.Droplet(
    backups=False,
    image=image,
    ipv6=True,
    name=name,
    private_networking=True,
    region=region,
    size_slug=size,
    ssh_keys=keys_to_use,
    tags=[],
    token=token,
    user_data=user_data,
)

print('Creating...', end=' ')
droplet.create()

if floating_ip_to_use:
    floating_ip_to_use.assign(droplet.id)

print('DONE')

D archives/nixos-infect => archives/nixos-infect +0 -276
@@ 1,276 0,0 @@
#! /usr/bin/env bash

# More info at: https://github.com/elitak/nixos-infect

set -e -o pipefail

makeConf() {
  # Skip everything if main config already present
  [[ -e /etc/nixos/configuration.nix ]] && return 0
  # NB <<"EOF" quotes / $ ` in heredocs, <<EOF does not
  mkdir -p /etc/nixos
  # Prevent grep for sending error code 1 (and halting execution) when no lines are selected : https://www.unix.com/man-page/posix/1P/grep
  local IFS=$'\n' 
  for trypath in /root/.ssh/authorized_keys $HOME/.ssh/authorized_keys; do 
      [[ -r "$trypath" ]] \
      && keys=$(sed -E 's/^.*((ssh|ecdsa)-[^[:space:]]+)[[:space:]]+([^[:space:]]+)([[:space:]]*.*)$/\1 \3\4/' "$trypath") \
      && break
  done
  local network_import=""

  [ "$PROVIDER" = "digitalocean" ] && network_import="./networking.nix # generated at runtime by nixos-infect"
  cat > /etc/nixos/configuration.nix << EOF
{ ... }: {
  imports = [
    ./hardware-configuration.nix
    $network_import
    $NIXOS_IMPORT
  ];

  boot.cleanTmpDir = true;
  networking.hostName = "$(hostname)";
  networking.firewall.allowPing = true;
  services.openssh.enable = true;
  users.users.root.openssh.authorizedKeys.keys = [$(while read -r line; do echo -n "
    \"$line\" "; done <<< "$keys")
  ];
}
EOF
  # If you rerun this later, be sure to prune the filesSystems attr
  cat > /etc/nixos/hardware-configuration.nix << EOF
{ ... }:
{
  imports = [ <nixpkgs/nixos/modules/profiles/qemu-guest.nix> ];
  boot.loader.grub.device = "$grubdev";
  fileSystems."/" = { device = "$rootfsdev"; fsType = "ext4"; };
}
EOF

  if [ "$PROVIDER" = "digitalocean" ]
  then
    makeNetworkingConf
  else
    true
  fi
}

makeNetworkingConf() {
  # XXX It'd be better if we used procfs for all this...
  local IFS=$'\n'
  eth0_name=$(ip address show | grep '^2:' | awk -F': ' '{print $2}')
  eth0_ip4s=$(ip address show dev "$eth0_name" | grep 'inet ' | sed -r 's|.*inet ([0-9.]+)/([0-9]+).*|{ address="\1"; prefixLength=\2; }|')
  eth0_ip6s=$(ip address show dev "$eth0_name" | grep 'inet6 ' | sed -r 's|.*inet6 ([0-9a-f:]+)/([0-9]+).*|{ address="\1"; prefixLength=\2; }|' || '')
  gateway=$(ip route show dev "$eth0_name" | grep default | sed -r 's|default via ([0-9.]+).*|\1|')
  gateway6=$(ip -6 route show dev "$eth0_name" | grep default | sed -r 's|default via ([0-9a-f:]+).*|\1|' || true)
  ether0=$(ip address show dev "$eth0_name" | grep link/ether | sed -r 's|.*link/ether ([0-9a-f:]+) .*|\1|')

  eth1_name=$(ip address show | grep '^3:' | awk -F': ' '{print $2}')||true
  if [ -n "$eth1_name" ];then
    eth1_ip4s=$(ip address show dev "$eth1_name" | grep 'inet ' | sed -r 's|.*inet ([0-9.]+)/([0-9]+).*|{ address="\1"; prefixLength=\2; }|')
    eth1_ip6s=$(ip address show dev "$eth1_name" | grep 'inet6 ' | sed -r 's|.*inet6 ([0-9a-f:]+)/([0-9]+).*|{ address="\1"; prefixLength=\2; }|' || '')
    ether1=$(ip address show dev "$eth1_name" | grep link/ether | sed -r 's|.*link/ether ([0-9a-f:]+) .*|\1|')
    interfaces1=<< EOF
      $eth1_name = {
        ipv4.addresses = [$(for a in "${eth1_ip4s[@]}"; do echo -n "
          $a"; done)
        ];
        ipv6.addresses = [$(for a in "${eth1_ip6s[@]}"; do echo -n "
          $a"; done)
        ];
EOF
    extraRules1="ATTR{address}==\"${ether1}\", NAME=\"${eth1_name}\""
  else
    interfaces1=""
    extraRules1=""
  fi

  nameservers=($(grep ^nameserver /etc/resolv.conf | cut -f2 -d' '))
  if [ "$eth0_name" = eth* ]; then
    predictable_inames="usePredictableInterfaceNames = lib.mkForce false;"
  else
    predictable_inames="usePredictableInterfaceNames = lib.mkForce true;"
  fi
  cat > /etc/nixos/networking.nix << EOF
{ lib, ... }: {
  # This file was populated at runtime with the networking
  # details gathered from the active system.
  networking = {
    nameservers = [$(for a in "${nameservers[@]}"; do echo -n "
      \"$a\""; done)
    ];
    defaultGateway = "${gateway}";
    defaultGateway6 = "${gateway6}";
    dhcpcd.enable = false;
    $predictable_inames
    interfaces = {
      $eth0_name = {
        ipv4.addresses = [$(for a in "${eth0_ip4s[@]}"; do echo -n "
          $a"; done)
        ];
        ipv6.addresses = [$(for a in "${eth0_ip6s[@]}"; do echo -n "
          $a"; done)
        ];
        ipv4.routes = [ { address = "${gateway}"; prefixLength = 32; } ];
        ipv6.routes = [ { address = "${gateway6}"; prefixLength = 32; } ];
      };
      $interfaces1
    };
  };
  services.udev.extraRules = ''
    ATTR{address}=="${ether0}", NAME="${eth0_name}"
    $extraRules1
  '';
}
EOF
  #! /usr/bin/env bash
  # NB put your semi-sensitive (not posted to github) configuration in a separate
  # file and include it via this customConfig() function. e.g.:
  #  customConfig() {
  #    cat > /etc/nixos/custom.nix << EOF
  #    { config, lib, pkgs, ... }: {
  #    }
  #    EOF
  #  }
  #
  # then you can add the files in configuration.nix's imports above and run something like:
  #   cat customConfig nixos-infect | root@targethost bash
  if [[ "$(type -t customConfig)" == "function" ]]; then customConfig; fi
}

makeSwap() {
  # TODO check currently available swapspace first
  swapFile=$(mktemp /tmp/nixos-infect.XXXXX.swp)
  dd if=/dev/zero "of=$swapFile" bs=1M count=$((1*1024))
  chmod 0600 "$swapFile"
  mkswap "$swapFile"
  swapon -v "$swapFile"
}

removeSwap() {
    swapoff -a
    rm -vf /tmp/nixos-infect.*.swp
}

prepareEnv() {
  # $grubdev is used in makeConf()
  for grubdev in /dev/vda /dev/sda; do [[ -e $grubdev ]] && break; done

  # Retrieve root fs block device
  #                   (get root mount)  (get partition or logical volume)
  rootfsdev=$(mount | grep "on / type" | awk '{print $1;}')

  # DigitalOcean doesn't seem to set USER while running user data
  export USER="root"
  export HOME="/root"

  # Use adapted wget if curl is missing
  which curl || { \
    curl() {
      eval "wget $(
        (local isStdout=1
        for arg in "$@"; do
          case "$arg" in
            "-o")
              echo "-O";
              isStdout=0
              ;;
            "-O")
              isStdout=0
              ;;
            "-L")
              ;;
            *)
              echo "$arg"
              ;;
          esac
        done;
        [[ $isStdout -eq 1 ]] && echo "-O-"
        )| tr '\n' ' '
      )"
    }; export -f curl; }

  # Nix installer tries to use sudo regardless of whether we're already uid 0
  #which sudo || { sudo() { eval "$@"; }; export -f sudo; }
  # shellcheck disable=SC2174
  mkdir -p -m 0755 /nix
}

req() {
  type "$1" > /dev/null 2>&1 || which "$1" > /dev/null 2>&1
}

checkEnv() {
  # Perform some easy fixups before checking
  which dnf && dnf install -y perl-Digest-SHA # Fedora 24
  which bzcat || (which yum && yum install -y bzip2) \
              || (which apt-get && apt-get update && apt-get install -y bzip2) \
              || true

  [[ "$(whoami)" == "root" ]] || { echo "ERROR: Must run as root"; return 1; }

  req curl || req wget || { echo "ERROR: Missing both curl and wget"; return 1; }
  req bzcat            || { echo "ERROR: Missing bzcat";              return 1; }
  req groupadd         || { echo "ERROR: Missing groupadd";           return 1; }
  req useradd          || { echo "ERROR: Missing useradd";            return 1; }
  req ip               || { echo "ERROR: Missing ip";                 return 1; }
  req awk              || { echo "ERROR: Missing awk";                return 1; }
  req cut              || { echo "ERROR: Missing cut";                return 1; }
}

infect() {
  # Add nix build users
  # FIXME run only if necessary, rather than defaulting true
  groupadd nixbld -g 30000 || true
  for i in {1..10}; do useradd -c "Nix build user $i" -d /var/empty -g nixbld -G nixbld -M -N -r -s "$(which nologin)" nixbld$i || true; done
  # TODO use addgroup and adduser as fallbacks
  #addgroup nixbld -g 30000 || true
  #for i in {1..10}; do adduser -DH -G nixbld nixbld$i || true; done

  curl https://nixos.org/nix/install | $SHELL

  # shellcheck disable=SC1090
  source ~/.nix-profile/etc/profile.d/nix.sh

  [[ -z "$NIX_CHANNEL" ]] && NIX_CHANNEL="nixos-19.09"
  nix-channel --remove nixpkgs
  nix-channel --add "https://nixos.org/channels/$NIX_CHANNEL" nixos
  nix-channel --update

  export NIXOS_CONFIG=/etc/nixos/configuration.nix

  nix-env --set \
    -I nixpkgs=$HOME/.nix-defexpr/channels/nixos \
    -f '<nixpkgs/nixos>' \
    -p /nix/var/nix/profiles/system \
    -A system

  # Remove nix installed with curl | bash
  rm -fv /nix/var/nix/profiles/default*
  /nix/var/nix/profiles/system/sw/bin/nix-collect-garbage

  # Reify resolv.conf
  [[ -L /etc/resolv.conf ]] && mv -v /etc/resolv.conf /etc/resolv.conf.lnk && cat /etc/resolv.conf.lnk > /etc/resolv.conf

  # Stage the Nix coup d'état
  touch /etc/NIXOS
  echo etc/nixos                   > /etc/NIXOS_LUSTRATE
  echo etc/resolv.conf            >> /etc/NIXOS_LUSTRATE
  echo root/.nix-defexpr/channels >> /etc/NIXOS_LUSTRATE

  rm -rf /boot.bak
  mv -v /boot /boot.bak
  /nix/var/nix/profiles/system/bin/switch-to-configuration boot
}

[ -z "$PROVIDER" ] && PROVIDER="digitalocean" # you may also prepend PROVIDER=vultr to your call instead

prepareEnv
makeSwap # smallest (512MB) droplet needs extra memory!
checkEnv
makeConf
infect
removeSwap

if [[ -z "$NO_REBOOT" ]]; then
  reboot
fi

D cachix.nix => cachix.nix +0 -13
@@ 1,13 0,0 @@

# WARN: this file will get overwritten by $ cachix use <name>
{ pkgs, lib, ... }:

let
  folder = ./cachix;
  toImport = name: value: folder + ("/" + name);
  filterCaches = key: value: value == "regular" && lib.hasSuffix ".nix" key;
  imports = lib.mapAttrsToList toImport (lib.filterAttrs filterCaches (builtins.readDir folder));
in {
  inherit imports;
  nix.binaryCaches = ["https://cache.nixos.org/"];
}

D cachix/nix-community.nix => cachix/nix-community.nix +0 -11
@@ 1,11 0,0 @@

{
  nix = {
    binaryCaches = [
      "https://nix-community.cachix.org"
    ];
    binaryCachePublicKeys = [
      "nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs="
    ];
  };
}

D cachix/nixpkgs-wayland.nix => cachix/nixpkgs-wayland.nix +0 -11
@@ 1,11 0,0 @@

{
  nix = {
    binaryCaches = [
      "https://nixpkgs-wayland.cachix.org"
    ];
    binaryCachePublicKeys = [
      "nixpkgs-wayland.cachix.org-1:3lwxaILxMRkVhehr5StQprHdEo4IrE8sRho9R9HOLYA="
    ];
  };
}

D cachix/sumnerevans.nix => cachix/sumnerevans.nix +0 -11
@@ 1,11 0,0 @@

{
  nix = {
    binaryCaches = [
      "https://sumnerevans.cachix.org"
    ];
    binaryCachePublicKeys = [
      "sumnerevans.cachix.org-1:z6/iKao2dNGnmPNsnlsOCsn12LgeAVv3XneaLdyeve0="
    ];
  };
}

D configuration.nix => configuration.nix +0 -16
@@ 1,16 0,0 @@
{ config, lib, pkgs, ... }: {
  # Import a bunch of things.
  imports = [
    # Include the results of the hardware scan. This is autogenerated.
    ./hardware-configuration.nix
    ./modules
  ];

  # This value determines the NixOS release from which the default
  # settings for stateful data, like file locations and database versions
  # on your system were taken. It‘s perfectly fine and recommended to leave
  # this value at the release version of the first install of this system.
  # Before changing this value read the documentation for this option
  # (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
  system.stateVersion = "20.03"; # Did you read the comment?
}

D host-configurations/bespin.nix => host-configurations/bespin.nix +0 -101
@@ 1,101 0,0 @@
{ config, lib, ... }: {
  hardware.isServer = true;
  boot.loader.grub = {
    forceInstall = true;
    device = "nodev";
  };

  # Set the hostname
  networking.hostName = "bespin";
  networking.domain = "sumnerevans.com";

  # IPv6 on Linode is just bad and doesn't work.
  networking.enableIPv6 = false;

  services.openssh.enable = true;
  services.openssh.permitRootLogin = "prohibit-password";

  networking.interfaces.eth0.useDHCP = true;

  # Enable a lot of swap since we have enough disk. This way, if Airsonic eats
  # memory, it won't crash the box.
  swapDevices = [
    { device = "/var/swapfile"; size = 4096; }
  ];

  fileSystems = {
    "/" = { device = "/dev/sda"; fsType = "ext4"; };
    "/mnt/syncthing-data" = { device = "/dev/disk/by-id/scsi-0Linode_Volume_syncthing-data"; fsType = "ext4"; };
  };

  # Websites
  services.nginx.enable = true;

  # Transitional redirects
  services.nginx.virtualHosts = {
    "bitwarden.sumnerevans.com" = {
      forceSSL = true;
      enableACME = true;
      locations."/".proxyPass = "http://5.161.43.204:8222";
    };
    "dav.sumnerevans.com" = {
      forceSSL = true;
      enableACME = true;
      locations."/".proxyPass = "http://5.161.43.204:8080";
    };

    "matrix.sumnerevans.com" = {
      enableACME = true;
      forceSSL = true;

      # If they access root, redirect to Element. If they access the API, then
      # forward on to Synapse.
      locations."/".return = "301 https://app.element.io";
      locations."/_matrix" = {
        proxyPass = "http://5.161.43.204:8008"; # without a trailing /
        extraConfig = ''
          access_log /var/log/nginx/matrix.access.log;
        '';
      };
      locations."/_matrix/federation/" = {
        proxyPass = "http://5.161.43.204:8009"; # without a trailing /
        extraConfig = ''
          access_log /var/log/nginx/matrix-federation.access.log;
        '';
      };
      locations."~ ^/_matrix/client/.*/(sync|events|initialSync)" = {
        proxyPass = "http://5.161.43.204:8010"; # without a trailing /
        extraConfig = ''
          access_log /var/log/nginx/matrix-synchotron.access.log;
        '';
      };
      locations."~ ^/(_matrix/media|_synapse/admin/v1/(purge_media_cache|(room|user)/.*/media.*|media/.*|quarantine_media/.*|users/.*/media))" = {
        proxyPass = "http://5.161.43.204:8011"; # without a trailing /
        extraConfig = ''
          access_log /var/log/nginx/matrix-media-repo.access.log;
        '';
      };
    };
  };

  ############
  # Services #
  ############
  services.grafana.enable = true;
  services.logrotate.enable = true;
  services.prometheus.enable = true;
  services.syncthing.enable = true;

  services.healthcheck = {
    checkId = "43c45999-cc22-430f-a767-31a1a17c6d1b";
    disks = [ "/" "/mnt/syncthing-data" ];
  };

  # Longview
  services.longview.enable = true;
  services.longview.apiKeyFile = ../secrets/longview/bespin;

  # Restic backup
  services.backup.healthcheckId = "a42858af-a9d7-4385-b02d-2679f92873ed";
  services.backup.healthcheckPruneId = "14ed7839-784f-4dee-adf2-f9e03c2b611e";
}

D host-configurations/coruscant.nix => host-configurations/coruscant.nix +0 -24
@@ 1,24 0,0 @@
{
  # Set the hostname
  networking.hostName = "coruscant";
  hardware.isPC = true;
  hardware.ramSize = 32;
  xorg.enable = true;

  programs.steam.enable = true;

  networking.interfaces.enp37s0.useDHCP = true;
  networking.interfaces.wlp35s0.useDHCP = true;

  services.xserver.videoDrivers = [ "nvidia" ];

  # Use systemd-boot
  boot.loader.systemd-boot.enable = true;

  # Enable Docker.
  virtualisation.docker.enable = true;

  # Enable the OpenSSH daemon.
  services.openssh.enable = true;
  services.openssh.ports = [ 32 ];
}

D host-configurations/jedha.nix => host-configurations/jedha.nix +0 -25
@@ 1,25 0,0 @@
{
  # Set the hostname
  networking.hostName = "jedha";
  hardware.isPC = true;
  hardware.ramSize = 32;
  hardware.isLaptop = true;

  # Enable bumblebee.
  # hardware.bumblebee.enable = true;

  networking.interfaces.enp0s31f6.useDHCP = true;
  networking.interfaces.wlp4s0.useDHCP = true;

  wayland.enable = true;
  # xorg.enable = true;
  # xorg.xkbVariant = "3l";

  programs.steam.enable = true;

  # Enable Docker.
  virtualisation.docker.enable = true;

  # Use systemd-boot
  boot.loader.systemd-boot.enable = true;
}

D host-configurations/kessel.nix => host-configurations/kessel.nix +0 -116
@@ 1,116 0,0 @@
{ config, lib, pkgs, ... }: with lib; {
  hardware.isServer = true;

  # Set the hostname
  networking.hostName = "kessel";
  networking.domain = "nevarro.space";

  services.openssh.enable = true;
  services.openssh.permitRootLogin = "prohibit-password";

  networking.interfaces.eth0.useDHCP = true;

  # Enable a lot of swap since we have enough disk. This way, if Airsonic eats
  # memory, it won't crash the box.
  swapDevices = [
    { device = "/var/swapfile"; size = 4096; }
  ];

  fileSystems = {
    "/" = { device = "/dev/disk/by-uuid/eb9f58f4-7c21-4ddc-a2e6-c9816f01e7c8"; fsType = "ext4"; };
    "/mnt/postgresql-data" = { device = "/dev/disk/by-uuid/0a948381-d1c1-430d-ad1b-0841114a00b9"; fsType = "ext4"; };
  };

  # Allow temporary redirects directly to the reverse proxy.
  networking.firewall.allowedTCPPortRanges = [
    { from = 8008; to = 8015; }
  ];

  ############
  # Websites #
  ############
  services.nginx.enable = true;
  services.nginx.websites = [
    { hostname = "nevarro.space"; }
  ];

  ############
  # Services #
  ############
  services.grafana.enable = true;
  services.logrotate.enable = true;

  # Healthcheck
  services.healthcheck = {
    checkId = "ac320939-f60f-4675-a284-76e318080eda";
    disks = [ "/" "/mnt/postgresql-data" ];
  };

  # Heisenbridge
  services.heisenbridge = {
    enable = true;
    homeserver = "https://matrix.nevarro.space";
    identd.enable = true;
    package = pkgs.callPackage ../pkgs/heisenbridge.nix { };
  };
  systemd.services.heisenbridge = {
    before = [ "matrix-synapse.target" ]; # So the registration file can be used by Synapse
  };
  services.matrix-synapse-custom.appServiceConfigFiles = [
    "/var/lib/heisenbridge/registration.yml"
  ];

  # LinkedIn <-> Matrix Bridge
  services.linkedin-matrix = {
    enable = true;
    homeserver = "https://matrix.nevarro.space";
  } // (import ../secrets/matrix/appservices/linkedin-matrix.nix);

  # Mjolnir
  services.mjolnir.enable = true;

  # PosgreSQL
  services.postgresql.enable = true;
  services.postgresql.dataDir = "/mnt/postgresql-data/${config.services.postgresql.package.psqlSchema}";
  services.postgresqlBackup.enable = true;

  # Quotesfilebot
  services.quotesfilebot = {
    enable = true;
    homeserver = "https://matrix.nevarro.space";
    passwordFile = "/etc/nixos/secrets/matrix/bots/quotesfilebot";
  };

  # Restic backup
  services.backup.healthcheckId = "efe08f4f-c0bb-4901-967d-b33774c18d80";
  services.backup.healthcheckPruneId = "7215d3b4-24d4-4ecf-9785-6b4161b3af28";

  # Standupbot
  services.standupbot = {
    enable = true;
    homeserver = "https://matrix.nevarro.space";
    passwordFile = "/etc/nixos/secrets/matrix/bots/standupbot";
  };

  # Synapse
  services.matrix-synapse-custom = {
    enable = true;
    registrationSharedSecretFile = ../secrets/matrix/registration-shared-secret/kessel;
    sharedSecretAuthFile = ../secrets/matrix/shared-secret-auth/nevarro.space;
    emailCfg = {
      smtp_host = "smtp.migadu.com";
      smtp_port = 587;
      require_transport_security = true;

      smtp_user = "matrix@nevarro.space";
      smtp_pass = removeSuffix "\n" (readFile ../secrets/matrix/nevarro-smtp-pass);

      notif_from = "Nevarro %(app)s Admin <matrix@nevarro.space>";
      app_name = "Matrix";
      enable_notifs = true;
      notif_for_new_users = false;
      invite_client_location = "https://app.element.io";
    };
  };
  services.cleanup-synapse.environmentFile = "/etc/nixos/secrets/matrix/cleanup-synapse/kessel";
}

D host-configurations/morak.nix => host-configurations/morak.nix +0 -135
@@ 1,135 0,0 @@
{ config, lib, ... }: {
  hardware.isServer = true;

  # Set the hostname
  networking.hostName = "morak";
  networking.domain = "sumnerevans.com";

  services.openssh.enable = true;
  services.openssh.permitRootLogin = "prohibit-password";

  networking.interfaces.eth0.useDHCP = true;

  # Enable a lot of swap since we have enough disk. This way, if Airsonic eats
  # memory, it won't crash the box.
  swapDevices = [
    { device = "/var/swapfile"; size = 4096; }
  ];

  fileSystems = {
    "/" = { device = "/dev/disk/by-uuid/78831675-9f80-462b-b9fc-75a0efa368e5"; fsType = "ext4"; };
    "/mnt/syncthing-data" = { device = "/dev/disk/by-uuid/930c8bdb-7b71-4bdf-b478-6e85218cad37"; fsType = "ext4"; };
    "/mnt/postgresql-data" = { device = "/dev/disk/by-uuid/3d8eb9ca-e8ea-4231-b2a6-4fc5367ccb8a"; fsType = "ext4"; };
  };

  # Allow temporary redirects directly to the reverse proxy.
  networking.firewall.allowedTCPPorts = [ 8222 8080 ];
  networking.firewall.allowedTCPPortRanges = [
    { from = 8008; to = 8015; }
  ];

  ############
  # Websites #
  ############
  services.nginx.enable = true;
  services.nginx.websites = [
    { hostname = "the-evans.family"; }
    { hostname = "qs.sumnerevans.com"; }
    {
      # sumnerevans.com
      hostname = "sumnerevans.com";
      extraLocations = {
        "/teaching" = {
          root = "/var/www";
          priority = 0;
          extraConfig = ''
            access_log /var/log/nginx/sumnerevans.com.access.log;
            autoindex on;
          '';
        };
      };
      excludeTerms = [
        "/.well-known/"
        "/dark-theme.min.js"
        "/favicon.ico"
        "/js/isso.min.js"
        "/profile.jpg"
        "/robots.txt"
        "/style.css"
        "/teaching/csci564-s21/_static/"
      ];
    }
  ];

  # Host reverse proxy services
  services.nginx.virtualHosts."tunnel.sumnerevans.com" = {
    addSSL = true;
    enableACME = true;

    extraConfig = ''
      error_page 502 /50x.html;
    '';

    locations = {
      "/50x.html".root = "/usr/share/nginx/html";
      "/".proxyPass = "http://localhost:1337/";
    };
  };

  ############
  # Services #
  ############
  services.airsonic.enable = true;
  services.grafana.enable = true;
  services.isso.enable = true;
  services.logrotate.enable = true;
  services.syncthing.enable = true;
  services.vaultwarden.enable = true;
  services.xandikos.enable = true;

  # Gonic
  services.gonic = {
    enable = true;
    scanInterval = 1;
    virtualHost = "music.sumnerevans.com";
    musicDir = "/mnt/syncthing-data/Music";
  };
  services.nginx.virtualHosts."music.sumnerevans.com" = {
    forceSSL = true;
    enableACME = true;
  };

  services.healthcheck = {
    checkId = "e1acf12a-ebc8-456a-aac8-96336e14d974";
    disks = [ "/" "/mnt/syncthing-data" "/mnt/postgresql-data" ];
  };

  # Mumble
  services.murmur.enable = true;

  # PosgreSQL
  services.postgresql.enable = true;
  services.postgresql.dataDir = "/mnt/postgresql-data/${config.services.postgresql.package.psqlSchema}";
  services.postgresqlBackup.enable = true;

  # PR Tracker
  # services.pr-tracker = {
  #   enable = true;
  #   githubApiTokenFile = "/etc/nixos/secrets/pr-tracker-github-token";
  #   sourceUrl = "https://git.sr.ht/~sumner/pr-tracker";
  # };

  # Restic backup
  services.backup.healthcheckId = "6c9caf62-4f7b-4ef7-82ac-d858d3bcbcb5";
  services.backup.healthcheckPruneId = "f90ed04a-2596-49d0-a89d-764780a27fc6";

  # Synapse
  services.matrix-synapse-custom.enable = true;
  services.matrix-synapse-custom.registrationSharedSecretFile = ../secrets/matrix/registration-shared-secret/morak;
  services.cleanup-synapse.environmentFile = "/etc/nixos/secrets/matrix/cleanup-synapse/morak";
  services.matrix-vacation-responder = {
    enable = true;
    username = "@sumner:sumnerevans.com";
    homeserver = "https://matrix.sumnerevans.com";
  };
}

D host-configurations/mustafar/default.nix => host-configurations/mustafar/default.nix +0 -102
@@ 1,102 0,0 @@
{ lib, pkgs, ... }: with pkgs; let
  sof-firmware = callPackage ./intel-sof-firmware.nix { };
  logrotateLib = import ../../lib/logrotate.nix;
in
{
  # Set the hostname
  networking.hostName = "mustafar";
  hardware.isPC = true;
  hardware.ramSize = 8;
  hardware.isLaptop = true;
  wayland.enable = true;

  nixpkgs.overlays = [
    # sof-firmware so sleep works on Kohaku
    (
      self: super: {
        sof-firmware = super.sof-firmware.overrideAttrs (
          old: rec {
            version = "1.5.1";
            src = super.fetchFromGitHub {
              owner = "thesofproject";
              repo = "sof-bin";
              rev = "ae61d2778b0a0f47461a52da0d1f191f651e0763";
              sha256 = "0j6bpwz49skvdvian46valjw4anwlrnkq703n0snkbngmq78prba";
            };

            installPhase = ''
              mkdir -p $out/lib/firmware/intel
              sed -i 's/ROOT=.*$/ROOT=$out/g' go.sh
              sed -i 's/VERSION=.*$/VERSION=v${version}/g' go.sh
              ./go.sh
            '';
          }
        );
      }
    )
  ];
  nix.enableRemoteBuildOnCoruscant = true;
  nix.enableRemoteBuildOnTatooine = true;

  virtualisation.docker.enable = true;

  # Get sound working
  # hardware.firmware = [ sof-firmware ];
  hardware.enableAllFirmware = true;
  hardware.enableRedistributableFirmware = true;
  boot.blacklistedKernelModules = [ "snd_hda_intel" "snd_soc_skl" ];

  # Use systemd-boot
  boot.loader.systemd-boot.enable = true;
  boot.kernelPackages = linuxPackages_5_10;
  boot.kernelPatches = [
    {
      name = "chromebook-config";
      patch = null;
      extraConfig = ''
        CHROMEOS_LAPTOP m
        CHROMEOS_PSTORE m
        CHROME_PLATFORMS y
        CROS_EC m
        CROS_EC_CHARDEV m
        CROS_EC_DEBUGFS m
        CROS_EC_I2C m
        CROS_EC_LPC m
        CROS_EC_SENSORHUB m
        CROS_EC_SPI m
        CROS_EC_SYSFS m
        CROS_EC_TYPEC m
        CROS_USBPD_LOGGER m
        EXTCON_USBC_CROS_EC m
        I2C_CROS_EC_TUNNEL m
        IIO_CROS_EC_ACCEL_LEGACY m
        IIO_CROS_EC_BARO m
        IIO_CROS_EC_LIGHT_PROX m
        IIO_CROS_EC_SENSORS m
        IIO_CROS_EC_SENSORS_CORE m
        IIO_CROS_EC_SENSORS_LID_ANGLE m
        KEYBOARD_CROS_EC m
        RTC_DRV_CROS_EC m
        SND_SOC_CROS_EC_CODEC m
      '';
    }
  ];

  # Orientation and ambient light
  hardware.sensor.iio.enable = true;

  # Set up networking.
  networking.interfaces.wlp0s20f3.useDHCP = true;

  # high-resolution display
  hardware.video.hidpi.enable = lib.mkDefault true;

  # Intel's libva driver
  hardware.opengl.extraPackages = [
    intel-media-driver
    vaapiVdpau
    libvdpau-va-gl
    intel-ocl
  ];
  environment.systemPackages = [ libva-utils ];
}

D host-configurations/mustafar/intel-sof-firmware.nix => host-configurations/mustafar/intel-sof-firmware.nix +0 -15
@@ 1,15 0,0 @@
{ lib, fetchurl }: with lib;
stdenv.mkDerivation rec {
  pname = "intel-sof-firmware";
  version = "1.5.0";

  src = ./sof-topology-hatch-1.5.tar.xz;

  phases = [ "unpackPhase" "installPhase" ];

  installPhase = ''
    mkdir -p $out/lib/firmware/intel/sof-tplg

    cp * $out/lib/firmware/intel/sof-tplg
  '';
}

D host-configurations/mustafar/sof-topology-hatch-1.5.tar.xz => host-configurations/mustafar/sof-topology-hatch-1.5.tar.xz +0 -0
D host-configurations/tatooine.nix => host-configurations/tatooine.nix +0 -31
@@ 1,31 0,0 @@
{
  # Set the hostname
  networking.hostName = "tatooine";
  hardware.ramSize = 8;

  networking.interfaces.eth0.useDHCP = true;

  # Enable a lot of swap since we have enough disk. This way, if Airsonic eats
  # memory, it won't crash the box.
  swapDevices = [
    { device = "/var/swapfile"; size = 4096; }
  ];

  fileSystems = {
    "/" = { device = "/dev/disk/by-uuid/b477c98a-376a-4dd8-a46c-03e3187188d8"; fsType = "ext4"; };
  };

  # Enable the OpenSSH daemon.
  services.openssh.enable = true;

  # Enable Docker.
  virtualisation.docker.enable = true;

  # Allow the Syncthing GUI through
  networking.firewall.allowedTCPPorts = [ 8384 2022 ];
  networking.firewall.allowedUDPPorts = [ 8384 2022 ];

  # Enable mosh and et
  programs.mosh.enable = true;
  services.eternal-terminal.enable = true;
}

D modules/beeper.nix => modules/beeper.nix +0 -6
@@ 1,6 0,0 @@
{
  # Add the Beeper stack cert to the list of certs
  security.pki.certificateFiles = [
    ../secrets/beeper-localenv-root-ca.pem
  ];
}

D modules/default.nix => modules/default.nix +0 -15
@@ 1,15 0,0 @@
#
# Contains modules for configuring systems.
#
{ pkgs, ... }: {
  imports = [
    ./hardware
    ./programs
    ./services
    ./users

    ./beeper.nix
    ./nix.nix
    ./time.nix
  ];
}

D modules/hardware/bluetooth.nix => modules/hardware/bluetooth.nix +0 -11
@@ 1,11 0,0 @@
{ config, lib, pkgs, ... }: with lib; let
  cfg = config.hardware.bluetooth;
in
{
  config = mkIf cfg.enable {
    services.blueman.enable = true;

    # Use the full pulseaudio that includes Bluetooth support.
    hardware.pulseaudio.package = pkgs.pulseaudioFull;
  };
}

D modules/hardware/bootloader.nix => modules/hardware/bootloader.nix +0 -8
@@ 1,8 0,0 @@
{ config, lib, pkgs, ... }: with lib; let
  bootloaderCfg = config.boot.loader;
in
{
  boot.cleanTmpDir = true;
  boot.loader.grub.devices = [ "/dev/sda" ];
  boot.loader.efi.canTouchEfiVariables = true;
}

D modules/hardware/default.nix => modules/hardware/default.nix +0 -96
@@ 1,96 0,0 @@
#
# Contains convenience modules for configuring the hardware.
#
{ config, pkgs, lib, ... }: with lib; let
  cfg = config.hardware;
in
{
  imports = [
    ./bluetooth.nix
    ./bootloader.nix
    ./firewall.nix
    ./laptop.nix
    ./networking.nix
    ./tmpfs.nix
    ./v4l2loopback.nix
  ];

  options = {
    hardware.isPC = mkEnableOption "PC mode";
    hardware.isServer = mkEnableOption "server mode";
  };

  config = mkMerge [
    {
      assertions = [
        {
          assertion = cfg.isPC -> !cfg.isServer && cfg.isServer -> !cfg.isPC;
          message = "isPC and isServer are mutually exclusive";
        }
      ];

      boot.kernel.sysctl."fs.inotify.max_user_instances" = 524288;
      boot.kernel.sysctl."fs.inotify.max_user_watches" = 524288;
    }

    (
      mkIf cfg.isPC {
        boot.loader.systemd-boot.enable = true;
        hardware.bluetooth.enable = true;
        networking.networkmanager.enable = true;

        # TODO fix this
        networking.firewall.enable = false;

        # Enable sound.
        hardware.pulseaudio.enable = true;
        hardware.pulseaudio.support32Bit = true;

        # Pipewire
        services.pipewire = {
          enable = true;
          alsa.enable = true;
          jack.enable = true;
          # pulse.enable = true;
        };

        # Suspend on power button press instead of shutdown.
        services.logind.extraConfig = ''
          HandlePowerKey=suspend
        '';

        # Enable Flatpak.
        services.flatpak.enable = true;
        xdg.portal.enable = true;

        # Enable YubiKey smart card mode.
        services.pcscd.enable = true;
      }
    )

    (
      mkIf cfg.isServer {
        services.healthcheck.enable = true;
        boot.loader.timeout = 10;
        system.autoUpgrade = {
          enable = true;
          dates = "monthly";
          channel = https://nixos.org/channels/nixos-unstable-small;
          allowReboot = true;
        };
        nix.gc.automatic = true;

        services.openssh.enable = true;
        services.openssh.permitRootLogin = "prohibit-password";

        # Enable LISH
        boot.kernelParams = [ "console=ttyS0,19200n8" ];
        boot.loader.grub.extraConfig = ''
          serial --speed=19200 --unit=0 --word=8 --party=no --stop=1;
          terminal_input serial;
          terminal_output serial;
        '';
      }
    )
  ];
}

D modules/hardware/firewall.nix => modules/hardware/firewall.nix +0 -4
@@ 1,4 0,0 @@
{ config, lib, pkgs, ... }: with lib;
mkIf config.networking.firewall.enable {
  networking.firewall.allowPing = true;
}

D modules/hardware/laptop.nix => modules/hardware/laptop.nix +0 -16
@@ 1,16 0,0 @@
{ config, lib, ... }: with lib; let
  cfg = config.hardware;
in
{
  options = {
    hardware.isLaptop = mkEnableOption "laptop-only configurations";
  };

  config = {
    # Enable powertop for power management.
    powerManagement.powertop.enable = cfg.isLaptop;

    # UPower
    services.upower.enable = true;
  };
}

D modules/hardware/networking.nix => modules/hardware/networking.nix +0 -33
@@ 1,33 0,0 @@
{ config, lib, pkgs, ... }: with lib; mkMerge [
  {
    # The global useDHCP flag is deprecated, therefore explicitly set to false
    # here. Per-interface useDHCP will be mandatory in the future, so this
    # generated config replicates the default behaviour.
    networking.useDHCP = false;
  }

  # If NetworkManager is enabled, then also enable strong swan integration.
  (
    mkIf config.networking.networkmanager.enable {
      networking.networkmanager.enableStrongSwan = true;

      services.globalprotect = {
        enable = true;
        # if you need a Host Integrity Protection report
        csdWrapper = "${pkgs.openconnect}/libexec/openconnect/hipreport.sh";
      };

      environment.systemPackages = [ pkgs.globalprotect-openconnect ];
    }
  )

  (
    mkIf (!config.networking.networkmanager.enable) {
      networking.usePredictableInterfaceNames = false;

      services.unbound = {
        enable = true;
      };
    }
  )
]

D modules/hardware/tmpfs.nix => modules/hardware/tmpfs.nix +0 -24
@@ 1,24 0,0 @@
{ config, lib, ... }: with lib; let
  cfg = config.hardware;
in
{
  options = {
    hardware.ramSize = mkOption {
      type = types.int;
      description = "How much RAM the hardware has.";
    };
  };

  config.fileSystems = mkIf cfg.isPC {
    # Temporary in-RAM Filesystems.
    "/home/sumner/tmp" = {
      fsType = "tmpfs";
      options = [ "nosuid" "nodev" "size=${toString cfg.ramSize}G" ];
    };

    "/home/sumner/.cache" = {
      fsType = "tmpfs";
      options = [ "nosuid" "nodev" "size=${toString cfg.ramSize}G" ];
    };
  };
}

D modules/hardware/v4l2loopback.nix => modules/hardware/v4l2loopback.nix +0 -23
@@ 1,23 0,0 @@
{ config, lib, ... }: with lib; let
  cfg = config.hardware;
in
{
  options = {
    hardware.enableVideoLoopback = mkOption {
      type = types.bool;
      description = "Enable v4l2loopback for loopback from OBS.";
      default = false;
    };
  };

  config = mkIf cfg.enableVideoLoopback {
    # For piping video capture of the screen back to a video output.
    boot.kernelModules = [ "v4l2loopback" ];
    boot.extraModulePackages = with config.boot.kernelPackages; [
      v4l2loopback
    ];
    environment.etc."modprobe.d/v4l2loopback.conf".text = ''
      options v4l2loopback exclusive_caps=1 video_nr=10 card_label="OBS Virtual Output"
    '';
  };
}

D modules/nix.nix => modules/nix.nix +0 -138
@@ 1,138 0,0 @@
{ config, lib, pkgs, ... }: with lib; let
  nixCfg = config.nix;
in
{
  options = {
    nix.enableRemoteBuildOnCoruscant = mkEnableOption "Enable remote builds on coruscant";
    nix.enableRemoteBuildOnTatooine = mkEnableOption "Enable remote builds on tatooine";
  };

  config = mkMerge [
    # Allow unfree software.
    {
      nixpkgs.config.allowUnfree = true;
      environment.variables.NIXPKGS_ALLOW_UNFREE = "1";

      nix.settings.trusted-substituters = [
        "https://sumnerevans.cachix.org"
        "https://nixpkgs-wayland.cachix.org"
      ];
    }

    # If automatic garbage collection is enabled, delete 30 days.
    (
      mkIf nixCfg.gc.automatic {
        nix.gc.options = "--delete-older-than 30d";
      }
    )

    # Use nix flakes
    {
      # https://github.com/nix-community/nix-direnv#via-configurationnix-in-nixos
      # Persist direnv derivations across garbage collections.
      nix.extraOptions = ''
        experimental-features = nix-command flakes
      '';
      nix.package = pkgs.nixUnstable;
    }

    # nix-direnv
    {
      # https://github.com/nix-community/nix-direnv#via-configurationnix-in-nixos
      # Persist direnv derivations across garbage collections.
      nix.extraOptions = ''
        keep-outputs = true
        keep-derivations = true
      '';
      environment.pathsToLink = [ "/share/nix-direnv" ];
    }

    # Allow builds to happen on coruscant
    (
      mkIf nixCfg.enableRemoteBuildOnCoruscant {
        nix = {
          buildMachines = [
            {
              hostName = "coruscant";
              system = "x86_64-linux";
              maxJobs = 1;
              speedFactor = 2;
              supportedFeatures = [ "nixos-test" "benchmark" "big-parallel" "kvm" ];
              mandatoryFeatures = [ ];
            }
            {
              hostName = "coruscant-lan";
              system = "x86_64-linux";
              maxJobs = 1;
              speedFactor = 2;
              supportedFeatures = [ "nixos-test" "benchmark" "big-parallel" "kvm" ];
              mandatoryFeatures = [ ];
            }
          ];
          distributedBuilds = true;
          extraOptions = ''
            builders-use-substitutes = true
          '';
        };
      }
    )

    # Allow builds to happen on tatooine
    (
      mkIf nixCfg.enableRemoteBuildOnTatooine {
        nix = {
          buildMachines = [
            {
              hostName = "tatooine";
              system = "x86_64-linux";
              maxJobs = 4;
              speedFactor = 2;
              supportedFeatures = [ "nixos-test" "benchmark" "big-parallel" "kvm" ];
              mandatoryFeatures = [ ];
            }
          ];
          distributedBuilds = true;
          extraOptions = ''
            builders-use-substitutes = true
          '';
        };
      }
    )

    (
      {
        programs.ssh =
          let
            coruscantPublicIp = lib.removeSuffix "\n" (builtins.readFile ../secrets/coruscant-ip);
          in
          {
            extraConfig = ''
              Host coruscant
                  IdentityFile /etc/nixos/secrets/nix-remote-build
                  HostName ${coruscantPublicIp}
                  Port 32

              Host coruscant-lan
                  IdentityFile /etc/nixos/secrets/nix-remote-build
                  HostName 192.168.0.14
                  Port 32

              Host tatooine
                  IdentityFile /etc/nixos/secrets/nix-remote-build
                  HostName 5.161.50.43
            '';
            knownHosts = {
              tatooine = {
                extraHostNames = [ "tatooine.sumnerevans.com" ];
                publicKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC3oHcGiwPtWbee1x+6rKdovw4/CNIyE6MbBqC+irqZnyBLchboLKF+n9Vw9XRZxBPHppcb57oUTjh4gFA8N2vKqjVIacMNHSGFhRXBfUYtaTnmhzNj8sFWPwWpYAneTEe0hFdDKhL63nHZsi3XySh7R+BEIFZrDeyvKH86/GRpQwepVpQV3giqtqDA4GVgla/Zcea5ES1uxEolgDQKszXv8Z8iRUnrohrSAgsanjw6B+41X4qrwVnsStYhVN42tT8I7BM6kko9bdsLf4bg/WqdYDwPA4cbg1RkppqI0k7eBXPNfyaUKquiWz6tmrX5IMeIejjV+2BHgu0Q0iweMtPy41DGX6MaaKawWx5hoLds8fszVK02GUoCee26B8oEX+3TGKF9gj62gDcBOEmjLaGjxFrnk/DEkm3zSahwaIjxsbLK0/tFLh5B9Bha5mNF7tU88JwwJl+Zh3R7vGzHTqfZ7XVvSVSfpOPpVm0q3RSHMvVPSulOI+pTbA6GAQn0dT8= sumner@tatooine";
              };
              coruscant = {
                extraHostNames = [ "192.168.0.14" coruscantPublicIp ];
                publicKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDcO1lMaMPbL2cr4XdKc6bQJIbQylIXaYfX0S+NN3z0AMw3HCfsNCwlWoxyjIbZBlP3aSrdTITq3eB0gw3l25029h3Q4Dve+I2hf6jpltaGVlpsyhMN8xu9yoqadd0cG71kn6Wn5/BlpaWZtrJy7Px9luCyeuDx+vkC05CLb28sjwYVdTzbuePygUONL7cH6Xd2ulLDW+dFoZIHwraEsqHk9AQRV3f2hokxG/VpbxbVAY7XNOkIrsfmX6y4IccUddffgs8uqsObHEWniPdWOcEocRJ4exORBoyS5SXvcHzUtGi8Q0jGPfKkSFPEYUNcgw0QlU4dzrT/xqm0COcOoXKK58+tZH/YMu0bshp+vIK3HDCCfcRtuv1ZMF/AFbHdY3fglUu3YK2Jpm5Vr8KzljqQXW3ekboILxZpuP2LA3YErS1lpaj3sbOlsfxNQhG7V8/gqo1PBQ4w//7wlav0TOY5GZD1Tw2lduaSAFuFHxVGBOy4Xu31mxa2Qej5YKc71VU= sumner@coruscant-nixos";
              };
            };
          };
      }
    )
  ];
}

D modules/programs/default.nix => modules/programs/default.nix +0 -43
@@ 1,43 0,0 @@
{ pkgs, ... }: {
  imports = [
    ./tmux.nix
  ];

  # Environment variables
  environment.homeBinInPath = true;

  # Minimal package set to install on all machines.
  environment.systemPackages = with pkgs; [
    bind
    direnv
    fd
    git
    git-crypt
    gnupg
    htop
    iftop
    inetutils
    lm_sensors
    mtr
    neovim
    nix-direnv
    openssl
    restic
    ripgrep
    rsync
    sysstat
    tmux
    tree
    unzip
    vim
    wireguard-tools
    zsh
  ];

  # Automatically start an SSH agent.
  programs.ssh.startAgent = true;

  # Enable ZSH for the command-not-found functionality
  programs.zsh.enable = true;
  environment.pathsToLink = [ "/share/zsh" ];
}

D modules/programs/tmux.nix => modules/programs/tmux.nix +0 -23
@@ 1,23 0,0 @@
{ pkgs, ... }: {
  programs.tmux = {
    enable = true;
    escapeTime = 0;
    extraConfig = ''
      set -g default-shell ${pkgs.zsh}/bin/zsh

      # Use Alt-HJKL to move around between vim panes and tmux windows.
      is_vim="ps -o state= -o comm= -t '#{pane_tty}' \
          | grep -iqE '^[^TXZ ]+ +(\\S+\\/)?g?(view|n?vim?x?)(diff)?$'"
      bind -n M-h if-shell "$is_vim" 'send-keys M-h' 'select-pane -L'
      bind -n M-j if-shell "$is_vim" 'send-keys M-j' 'select-pane -D'
      bind -n M-k if-shell "$is_vim" 'send-keys M-k' 'select-pane -U'
      bind -n M-l if-shell "$is_vim" 'send-keys M-l' 'select-pane -R'

      # Open a new window with Alt-Enter
      bind -n M-Enter split-window -h

      # Use the mouse
      set -g mouse on
    '';
  };
}

D modules/services/acme.nix => modules/services/acme.nix +0 -4
@@ 1,4 0,0 @@
{
  security.acme.defaults.email = "admin@sumnerevans.com";
  security.acme.acceptTerms = true;
}

D modules/services/airsonic.nix => modules/services/airsonic.nix +0 -28
@@ 1,28 0,0 @@
{ config, lib, ... }:
let
  serverName = "airsonic.${config.networking.domain}";
  airsonicCfg = config.services.airsonic;
in
lib.mkIf airsonicCfg.enable {
  # Create the airsonic service.
  services.airsonic = {
    maxMemory = 1024;
    virtualHost = serverName;
  };

  users.groups.music = { };
  systemd.services.airsonic.serviceConfig.Group = "music";
  users.users.airsonic.extraGroups = [ "music" ];

  services.nginx.virtualHosts = {
    ${serverName} = {
      forceSSL = true;
      enableACME = true;
    };
  };

  # Add a backup service.
  services.backup.backups.airsonic = {
    path = config.users.users.airsonic.home;
  };
}

D modules/services/bitwarden.nix => modules/services/bitwarden.nix +0 -35
@@ 1,35 0,0 @@
{ config, lib, ... }:
let
  serverName = "bitwarden.${config.networking.domain}";
  bitwardenCfg = config.services.vaultwarden;
in
lib.mkIf bitwardenCfg.enable {
  services.vaultwarden = {
    config = {
      domain = "https://${serverName}";
      rocketAddress = "0.0.0.0";
      rocketLog = "critical";
      rocketPort = 8222;
      signupsAllowed = false;
      websocketAddress = "0.0.0.0";
      websocketEnabled = true;
      websocketPort = 3012;
    };
  };

  # Reverse proxy Bitwarden.
  services.nginx.virtualHosts."${serverName}" = {
    forceSSL = true;
    enableACME = true;
    locations = {
      "/".proxyPass = "http://127.0.0.1:8222";
      "/notifications/hub".proxyPass = "http://127.0.0.1:3012";
      "/notifications/hub/negotiate".proxyPass = "http://127.0.0.1:8222";
    };
  };

  # Add a backup service.
  services.backup.backups.bitwarden = {
    path = "/var/lib/bitwarden_rs";
  };
}

D modules/services/default.nix => modules/services/default.nix +0 -31
@@ 1,31 0,0 @@
{ config, pkgs, ... }:
{
  imports = [
    ./matrix
    ./gui

    ./acme.nix
    ./airsonic.nix
    ./bitwarden.nix
    ./docker.nix
    ./goaccess.nix
    ./gonic.nix
    ./grafana.nix
    ./healthcheck.nix
    ./isso.nix
    ./journald.nix
    ./longview.nix
    ./mumble.nix
    ./nginx.nix
    ./postgresql.nix
    ./pr-tracker.nix
    ./restic.nix
    ./sshd.nix
    ./syncthing.nix
    ./xandikos.nix
  ];

  # Enable Redis and PostgreSQL
  services.redis.enable = true;
  services.postgresql.enable = true;
}

D modules/services/docker.nix => modules/services/docker.nix +0 -9
@@ 1,9 0,0 @@
{ config, lib, pkgs, ... }:
let
  dockerCfg = config.virtualisation.docker;
in
lib.mkIf dockerCfg.enable {
  environment.systemPackages = [
    pkgs.docker-compose
  ];
}

D modules/services/goaccess.nix => modules/services/goaccess.nix +0 -166
@@ 1,166 0,0 @@
{ config, lib, options, pkgs, ... }: with lib; let
  cfg = config.services.metrics;
  hostnameDomain = "${config.networking.hostName}.${config.networking.domain}";
  goaccessDir = "/var/www/goaccess";
  excludeIPs = [
    "184.96.89.215"
    "184.96.97.165"
    "63.239.147.18"
  ];

  goaccessCmd = infile: outfile: ''
    ${pkgs.goaccess}/bin/goaccess ${infile} \
      -o ${outfile} \
      --ignore-crawlers \
      ${concatMapStringsSep " " (e: "-e \"${e}\"") excludeIPs} \
      --real-os \
      --log-format=COMBINED
  '';

  goaccessWebsiteMetricsForDayScriptPart = infile: hostname: n: ''
    # Output log for day $DATE - ${toString n}
    logdatefmt=$(${pkgs.coreutils}/bin/date +%d/%b/%Y -d "$DATE - ${toString n} day")
    outdatefmt=$(${pkgs.coreutils}/bin/date +%Y-%m-%d -d "$DATE - ${toString n} day")
    ${pkgs.coreutils}/bin/cat ${infile} |
      ${pkgs.gnugrep}/bin/grep "\[$logdatefmt" |
      ${goaccessCmd "-" "${goaccessDir}/${hostname}/days/$outdatefmt.html"}
  '';

  pipeIf = condition: cmd: if condition then "| ${cmd}" else "";

  goaccessWebsiteMetricsScript = { hostname, excludeTerms, ... }:
    pkgs.writeShellScript "goaccess-${hostname}" ''
      set -xef
      cd /var/log/nginx
      mkdir -p ${goaccessDir}/${hostname}/days

      logtmp=$(${pkgs.coreutils}/bin/mktemp)
      trap "rm -rfv $logtmp" EXIT

      # Combine the gzipped and non-gziped logs together
      ${pkgs.coreutils}/bin/cat \
        <(${pkgs.findutils}/bin/find . -regextype awk -regex "./${hostname}.access.log.[0-9]+.gz" |
            ${pkgs.findutils}/bin/xargs ${pkgs.gzip}/bin/zcat -fq) \
        <(${pkgs.findutils}/bin/find . -regextype awk -regex "./${hostname}.access.log(\.[0-9]+)?" |
            ${pkgs.findutils}/bin/xargs ${pkgs.coreutils}/bin/cat) \
            ${pipeIf (excludeTerms != [])
      "${pkgs.gnugrep}/bin/grep -v ${concatMapStringsSep " " (e: "-e \"${e}\"") excludeTerms}"} \
        > $logtmp

      # Run Goaccess for all of the logs that we have.
      ${goaccessCmd "$logtmp" "${goaccessDir}/${hostname}/index.html"}

      # Run Goaccess for the past week days as well.
      ${concatMapStringsSep "\n" (goaccessWebsiteMetricsForDayScriptPart "$logtmp" hostname) (range 0 7)}

      # Clean-up days older than a month.
      ${pkgs.findutils}/bin/find ${goaccessDir}/${hostname}/days -mtime +30 -delete
    '';

  hostListItem = { hostname, ... }: ''
    echo "
        <li>
          <a href=\"/metrics/${hostname}\">${hostname}</a>
          (<a href=\"/metrics/${hostname}/days\">Per Day</a>)
        </li>" >> ${goaccessDir}/index.html
  '';

  goaccessScript = websites: pkgs.writeShellScript "goaccess" ''
    set -xe
    cd /var/log/nginx
    ${pkgs.coreutils}/bin/mkdir -p ${goaccessDir}

    echo "<html>"                                > ${goaccessDir}/index.html
    echo "<head><title>Metrics</title></head>"  >> ${goaccessDir}/index.html
    echo "<body>"                               >> ${goaccessDir}/index.html
    echo "<h1>Metrics</h1>"                     >> ${goaccessDir}/index.html
    echo "<ul>"                                 >> ${goaccessDir}/index.html

    ${concatMapStringsSep "\n" hostListItem websites}

    echo "</ul>"                                >> ${goaccessDir}/index.html
    echo "</body>"                              >> ${goaccessDir}/index.html
  '';
in
{
  options =
    let
      websiteOpts = { ... }: {
        options = {
          hostname = mkOption {
            type = types.str;
            description = "Website name";
          };
          extraLocations = mkOption {
            type = types.attrsOf (types.submodule options.services.nginx.virtualHosts.locations.type);
            description = "Exclude patterns for metrics.";
            default = [];
          };
          excludeTerms = mkOption {
            type = types.listOf types.str;
            description = "Exclude patterns for metrics.";
            default = [];
          };
        };
      };
    in
      {
        services.metrics = {
          websites = mkOption {
            type = with types; listOf (submodule websiteOpts);
            description = ''
              A list of websites to create metrics for.
            '';
            default = [];
          };
        };
      };

  config = mkIf (cfg.websites != []) {
    systemd.services =
      let
        mkGoaccessService = website: {
          name = "goaccess-${website.hostname}";
          value = {
            description = "Goaccess web log report for ${website.hostname}.";
            serviceConfig = {
              User = "root";
              ExecStart = "${goaccessWebsiteMetricsScript website}";
            };
          };
        };
      in
        listToAttrs (map mkGoaccessService cfg.websites) // {
          goaccess-index = {
            description = "Generate Goaccess index.";
            serviceConfig = {
              User = "root";
              ExecStart = "${goaccessScript cfg.websites}";
              Type = "oneshot";
            };
          };
        };

    systemd.timers = let
      mkGoaccessTimer = website: {
        name = "goaccess-${website.hostname}";
        value = {
          description = "Goaccess timer for ${website.hostname}";
          wantedBy = [ "timers.target" ];
          timerConfig.OnCalendar = "*:0/15";
        };
      };
    in
      listToAttrs (map mkGoaccessTimer cfg.websites);

    # Set up nginx to forward requests properly.
    services.nginx.virtualHosts.${hostnameDomain} = {
      locations."/metrics/" = {
        alias = "${goaccessDir}/";
        extraConfig = ''
          autoindex on;
        '';
      };
    };
  };
}

D modules/services/gonic.nix => modules/services/gonic.nix +0 -116
@@ 1,116 0,0 @@
{ config, lib, pkgs, ... }: with lib;
let
  cfg = config.services.gonic;
in
{
  options.services.gonic = {
    enable = mkEnableOption "gonic, a Subsonic compatible music streaming server";
    home = mkOption {
      type = types.path;
      description = "The root directory for Gonic data.";
      default = "/var/lib/gonic";
    };
    virtualHost = mkOption {
      type = types.nullOr types.str;
      default = null;
      description = ''
        Name of the nginx virtualhost to use and setup. If null, do not setup any virtualhost.
      '';
    };
    musicDir = mkOption {
      type = types.path;
      description = "The path to the music directory";
      default = "/var/lib/gonic/music";
    };
    podcastPath = mkOption {
      type = types.path;
      description = "The path to the podcast directory";
      default = "/var/lib/gonic/podcasts";
    };
    cachePath = mkOption {
      type = types.path;
      description = "The path to the cache directory";
      default = "/var/lib/gonic/cache";
    };
    dbPath = mkOption {
      type = types.path;
      description = "The path to the Gonic database file.";
      default = "/var/lib/gonic/gonic.db";
    };
    listenAddress = mkOption {
      type = types.str;
      description = "The host and port to listen on";
      default = "0.0.0.0:4747";
    };
    proxyPrefix = mkOption {
      type = types.str;
      description = "url path prefix to use if behind reverse proxy";
      default = "/";
    };
    scanInterval = mkOption {
      type = types.nullOr types.int;
      description = "interval (in minutes) to check for new music (automatic scanning disabled if null)";
      default = null;
    };
    jukeboxEnabled = mkOption {
      type = types.bool;
      description = "whether the subsonic jukebox api should be enabled";
      default = false;
    };
    genreSplit = mkOption {
      type = types.nullOr types.str;
      description = "a string or character to split genre tags on for multi-genre support";
      default = null;
    };
  };

  config = mkIf cfg.enable {
    systemd.services.gonic = {
      description = "Gonic service";
      after = [ "network.target" ];
      wantedBy = [ "multi-user.target" ];

      environment = {
        GONIC_MUSIC_PATH = cfg.musicDir;
        GONIC_PODCAST_PATH = cfg.podcastPath;
        GONIC_CACHE_PATH = cfg.cachePath;
        GONIC_DB_PATH = cfg.dbPath;
        GONIC_LISTEN_ADDR = cfg.listenAddress;
        GONIC_PROXY_PREFIX = cfg.proxyPrefix;
        GONIC_SCAN_INTERVAL = toString cfg.scanInterval;
        GONIC_JUKEBOX_ENABLED = toString cfg.jukeboxEnabled;
        GONIC_GENRE_SPLIT = cfg.genreSplit;
      };
      preStart = ''
        mkdir -p ${cfg.musicDir}
        mkdir -p ${cfg.podcastPath}
        mkdir -p ${cfg.cachePath}
      '';
      serviceConfig = {
        ExecStart = "${pkgs.gonic}/bin/gonic";
        TimeoutSec = 10;
        Restart = "always";
        User = "gonic";
        Group = "music";
      };
    };

    services.nginx = mkIf (cfg.virtualHost != null) {
      enable = true;
      recommendedProxySettings = true;
      virtualHosts.${cfg.virtualHost} = {
        locations.${cfg.proxyPrefix}.proxyPass = "http://${cfg.listenAddress}";
      };
    };

    users.users.gonic = {
      description = "Gonic service user";
      group = "music";
      name = "gonic";
      home = cfg.home;
      createHome = true;
      isSystemUser = true;
    };
    users.groups.music = { };
  };
}

D modules/services/grafana.nix => modules/services/grafana.nix +0 -15
@@ 1,15 0,0 @@
{ config, lib, options, pkgs, ... }: with lib; let
  serverName = "grafana.${config.networking.hostName}.${config.networking.domain}";
in
mkIf config.services.grafana.enable {
  services.grafana.domain = serverName;

  services.nginx.virtualHosts.${config.services.grafana.domain} = {
    forceSSL = true;
    enableACME = true;
    locations."/" = {
      proxyPass = "http://127.0.0.1:${toString config.services.grafana.port}";
      proxyWebsockets = true;
    };
  };
}

D modules/services/gui/default.nix => modules/services/gui/default.nix +0 -28
@@ 1,28 0,0 @@
{ config, lib, pkgs, ... }: with lib; {
  imports = [
    ./fonts.nix
    ./i3wm.nix
    ./sway.nix
  ];

  config = mkIf (config.xorg.enable || config.wayland.enable) {
    # Add some Gnome services to make things work.
    programs.dconf.enable = true;
    services.dbus.packages = with pkgs; [ dconf gcr ];
    services.gnome.at-spi2-core.enable = true;
    services.gnome.gnome-keyring.enable = true;

    # Enable CUPS to print documents.
    services.printing.enable = true;

    # Thumbnailing service
    services.tumbler.enable = true;

    # Use geoclue2 as the location provider for things like redshift/gammastep.
    location.provider = "geoclue2";
    services.geoclue2.appConfig.redshift = {
      isAllowed = true;
      isSystem = true;
    };
  };
}

D modules/services/gui/fonts.nix => modules/services/gui/fonts.nix +0 -22
@@ 1,22 0,0 @@
{ config, lib, pkgs, ... }: {
  fonts = lib.mkIf (config.xorg.enable || config.wayland.enable) {
    fonts = with pkgs; [
      font-awesome_4
      iosevka
      noto-fonts
      noto-fonts-emoji
      open-sans
      powerline-fonts
      terminus-nerdfont
    ];

    fontconfig = {
      enable = true;
      defaultFonts = {
        monospace = [ "Iosevka" "Font Awesome" ];
        sansSerif = [ "Open Sans" ];
        serif = [ "Noto Serif" ];
      };
    };
  };
}

D modules/services/gui/i3wm.nix => modules/services/gui/i3wm.nix +0 -38
@@ 1,38 0,0 @@
{ config, lib, pkgs, ... }: with lib; let
  cfg = config.xorg;
in
{
  options = {
    xorg = {
      enable = mkOption {
        type = types.bool;
        description = "Enable the Xorg stack";
        default = false;
      };

      xkbVariant = mkOption {
        type = types.str;
        description = "The XKB variant to use";
        default = "";
      };
    };
  };

  config = mkIf cfg.enable {
    services.xserver = {
      # Enable the X11 windowing system.
      enable = true;
      windowManager.i3.enable = true;

      # Use 3l
      layout = "us";
      xkbVariant = mkIf (cfg.xkbVariant != "") cfg.xkbVariant;

      # Enable touchpad support.
      libinput = {
        enable = true;
        touchpad.tapping = false;
      };
    };
  };
}

D modules/services/gui/sway.nix => modules/services/gui/sway.nix +0 -24
@@ 1,24 0,0 @@
{ config, lib, pkgs, ... }: with lib; let
  cfg = config.wayland;

  rev = "master"; # 'rev' could be a git rev, to pin the overlay.
  url = "https://github.com/nix-community/nixpkgs-wayland/archive/${rev}.tar.gz";
  waylandOverlay = (import "${builtins.fetchTarball url}/overlay.nix");
in
{
  options.wayland = {
    enable = mkEnableOption "the wayland stack";
  };

  config = mkIf cfg.enable {
    nixpkgs.overlays = [ waylandOverlay ];

    xdg.portal = {
      enable = true;
      extraPortals = with pkgs; [ xdg-desktop-portal-wlr xdg-desktop-portal-gtk ];
      gtkUsePortal = true;
    };

    programs.sway.enable = true;
  };
}

D modules/services/healthcheck.nix => modules/services/healthcheck.nix +0 -60
@@ 1,60 0,0 @@
{ config, lib, pkgs, ... }: with lib;
let
  healthcheckCfg = config.services.healthcheck;
  threshold = 97;

  healthcheckCurl = fail: ''
    ${pkgs.curl}/bin/curl \
      --verbose \
      -fsS \
      --retry 2 \
      --max-time 5 \
      --ipv4 \
      https://hc-ping.com/${healthcheckCfg.checkId}${optionalString fail "/fail"}
  '';

  diskCheckScript = with pkgs; disk: writeShellScriptBin "diskcheck" ''
    set -xe
    CURRENT=$(${coreutils}/bin/df ${disk} | ${gnugrep}/bin/grep ${disk} | ${gawk}/bin/awk '{ print $5}' | ${gnused}/bin/sed 's/%//g')

    if [ "$CURRENT" -gt "${toString threshold}" ] ; then
      echo "Used space on ${disk} is over ${toString threshold}%"
      ${healthcheckCurl true}
      exit 1
    fi
  '';

  healthcheckScript = pkgs.writeShellScriptBin "healthcheck" ''
    set -xe

    ${concatMapStringsSep "\n" (disk: "${diskCheckScript disk}/bin/diskcheck") healthcheckCfg.disks}

    # Everything worked, so success.
    ${healthcheckCurl false}
  '';
in
{
  options.services.healthcheck = {
    enable = mkEnableOption "the healthcheck ping service.";
    checkId = mkOption {
      type = types.str;
      description = "The healthchecks.io check ID.";
    };
    disks = mkOption {
      type = with types; listOf str;
      default = [ ];
      description = "List of paths to disks to check for usage thresholds";
    };
  };

  config = mkIf healthcheckCfg.enable {
    systemd.services.healthcheck = {
      description = "Healthcheck service";
      startAt = "*:*:0/30"; # Send a healthcheck ping every 30 seconds.
      serviceConfig = {
        ExecStart = "${healthcheckScript}/bin/healthcheck";
        TimeoutSec = 10;
      };
    };
  };
}

D modules/services/isso.nix => modules/services/isso.nix +0 -60
@@ 1,60 0,0 @@
{ config, lib, pkgs, ... }: with lib;
let
  issoCfg = config.services.isso;
in
{
  config = mkIf issoCfg.enable {
    services.isso.settings = {
      general = {
        host = "https://sumnerevans.com";
        notify = "smtp";
        reply-notifications = true;
        gravatar = true;
      };
      moderation = {
        enabled = true;
        purge-after = "30d";
      };
      server = {
        listen = "http://127.0.0.1:8888/";
      };
      smtp = {
        username = "comments@sumnerevans.com";
        password = lib.removeSuffix "\n" (builtins.readFile ../../secrets/isso-comments-smtp-password);
        host = "smtp.migadu.com";
        port = 587;
        security = "starttls";
        to = "admin@sumnerevans.com";
        from = "comments@sumnerevans.com";
      };
      guard = {
        enabled = true;
        ratelimit = 2;
        direct-reply = 3;
        reply-to-self = false;
      };
      markup = {
        options = "tables, fenced-code, footnotes, autolink, strikethrough, underline, math, math-explicit";
        allowed-elements = "img";
        allowed-attributes = "src";
      };
      admin = {
        enabled = true;
        password = lib.removeSuffix "\n" (builtins.readFile ../../secrets/isso-admin-password);
      };
    };

    # Set up nginx to forward requests properly.
    services.nginx.virtualHosts = {
      "comments.sumnerevans.com" = {
        enableACME = true;
        forceSSL = true;

        locations."/".proxyPass = "http://127.0.0.1:8888";
      };
    };

    # Add a backup service.
    services.backup.backups.isso.path = "/var/lib/isso";
  };
}

D modules/services/journald.nix => modules/services/journald.nix +0 -6
@@ 1,6 0,0 @@
{ config, lib, ... }: with lib;
{
  services.journald.extraConfig = ''
    SystemMaxUse=2G
  '';
}

D modules/services/longview.nix => modules/services/longview.nix +0 -10
@@ 1,10 0,0 @@
{ config, lib, ... }:
let
  hostnameDomain = "${config.networking.hostName}.${config.networking.domain}";
  longviewCfg = config.services.longview;
in
lib.mkIf longviewCfg.enable {
  services.longview = {
    nginxStatusUrl = "https://${hostnameDomain}/status";
  };
}

D modules/services/matrix/coturn.nix => modules/services/matrix/coturn.nix +0 -85
@@ 1,85 0,0 @@
# See: https://nixos.org/nixos/manual/index.html#module-services-matrix-synapse
{ config, lib, pkgs, ... }:
let
  turnDomain = "turn.${config.networking.domain}";
  certs = config.security.acme.certs;
  staticAuthSecret = lib.removeSuffix "\n" (builtins.readFile ../../../secrets/coturn-static-auth-secret);
in
# TODO actually figure this out eventually
# TODO will need to convert to use matrix-synapse-custom
lib.mkIf (false && config.services.matrix-synapse.enable) {
  services.coturn = rec {
    enable = true;
    no-cli = true;
    no-tcp-relay = true;
    min-port = 49000;
    max-port = 50000;
    use-auth-secret = true;
    static-auth-secret = staticAuthSecret;
    realm = turnDomain;
    cert = "${certs.${turnDomain}.directory}/full.pem";
    pkey = "${certs.${turnDomain}.directory}/key.pem";
    extraConfig = ''
      # for debugging
      verbose
      # ban private IP ranges
      no-multicast-peers
      denied-peer-ip=0.0.0.0-0.255.255.255
      denied-peer-ip=10.0.0.0-10.255.255.255
      denied-peer-ip=100.64.0.0-100.127.255.255
      denied-peer-ip=127.0.0.0-127.255.255.255
      denied-peer-ip=169.254.0.0-169.254.255.255
      denied-peer-ip=172.16.0.0-172.31.255.255
      denied-peer-ip=192.0.0.0-192.0.0.255
      denied-peer-ip=192.0.2.0-192.0.2.255
      denied-peer-ip=192.88.99.0-192.88.99.255
      denied-peer-ip=192.168.0.0-192.168.255.255
      denied-peer-ip=198.18.0.0-198.19.255.255
      denied-peer-ip=198.51.100.0-198.51.100.255
      denied-peer-ip=203.0.113.0-203.0.113.255
      denied-peer-ip=240.0.0.0-255.255.255.255
      denied-peer-ip=::1
      denied-peer-ip=64:ff9b::-64:ff9b::ffff:ffff
      denied-peer-ip=::ffff:0.0.0.0-::ffff:255.255.255.255
      denied-peer-ip=100::-100::ffff:ffff:ffff:ffff
      denied-peer-ip=2001::-2001:1ff:ffff:ffff:ffff:ffff:ffff:ffff
      denied-peer-ip=2002::-2002:ffff:ffff:ffff:ffff:ffff:ffff:ffff
      denied-peer-ip=fc00::-fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
      denied-peer-ip=fe80::-febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff
    '';
  };

  # open the firewall
  networking.firewall = {
    interfaces.enp2s0 =
      let
        ranges = with config.services.coturn; [
          { from = min-port; to = max-port; }
        ];
      in
        {
          allowedUDPPortRanges = ranges;
          allowedUDPPorts = [ 3478 ];
          allowedTCPPortRanges = ranges;
          allowedTCPPorts = [ 3478 ];
        };
  };

  # get a certificate
  services.nginx.virtualHosts.${turnDomain}.enableACME = true;
  security.acme.certs.${turnDomain} = {
    group = "turnserver";
    postRun = "systemctl restart coturn.service";
  };
  users.groups.turnserver.members = [ "turnserver" "nginx" ];

  # configure synapse to point users to coturn
  services.matrix-synapse = with config.services.coturn; {
    turn_uris = [
      "turn:${turnDomain}:3478?transport=udp"
      "turn:${turnDomain}:3478?transport=tcp"
    ];
    turn_shared_secret = staticAuthSecret;
    turn_user_lifetime = "1h";
  };
}

D modules/services/matrix/default.nix => modules/services/matrix/default.nix +0 -12
@@ 1,12 0,0 @@
{
  imports = [
    ./coturn.nix
    # ./heisenbridge.nix
    ./linkedin-matrix.nix
    ./matrix-vacation-responder.nix
    ./mjolnir.nix
    ./quotesfilebot.nix
    ./standupbot.nix
    ./synapse
  ];
}

D modules/services/matrix/heisenbridge.nix => modules/services/matrix/heisenbridge.nix +0 -125
@@ 1,125 0,0 @@
{ config, lib, pkgs, ... }: with lib; let
  cfg = config.services.heisenbridge;
  heisenbridge = pkgs.callPackage ../../../pkgs/heisenbridge.nix { };

  heisenbridgeAppserviceConfig = {
    id = "heisenbridge";
    url = "http://${cfg.listenAddress}:${toString cfg.listenPort}";
    as_token = cfg.appServiceToken;
    hs_token = cfg.homeserverToken;
    rate_limited = false;
    sender_localpart = cfg.senderLocalpart;
    namespaces = {
      users = [{ regex = "@irc_.*"; exclusive = true; }];
      aliases = [ ];
      rooms = [ ];
    };
  };

  yamlFormat = pkgs.formats.yaml { };
  heisenbridgeConfigYaml = yamlFormat.generate "heisenbridge.yaml" heisenbridgeAppserviceConfig;
in
{
  options = {
    services.heisenbridge = {
      enable = mkEnableOption "heisenbridge, a bouncer-style Matrix IRC bridge.";
      identd.enable = mkEnableOption "identd for heisenbridge" // {
        default = true;
      };
      useLocalSynapse = mkOption {
        type = types.bool;
        default = true;
        description = "Whether or not to use the local synapse instance.";
      };
      homeserver = mkOption {
        type = types.str;
        default = "http://localhost:8008";
        description = "The URL of the Matrix homeserver.";
      };
      listenAddress = mkOption {
        type = types.str;
        default = "127.0.0.1";
        description = "The address for heisenbridge to listen on.";
      };
      listenPort = mkOption {
        type = types.int;
        default = 9898;
        description = "The port for heisenbridge to listen on.";
      };
      senderLocalpart = mkOption {
        type = types.str;
        default = "heisenbridge";
        description = "The localpart of the heisenbridge admin bot's username.";
      };
      ownerId = mkOption {
        type = types.nullOr types.str;
        default = null;
        description = ''
          The owner MXID (for example, @user:homeserver) of the bridge. If
          unspecified, the first talking local user will claim the bridge.
        '';
      };
      appServiceToken = mkOption {
        type = types.str;
        description = ''
          This is the token that the app service should use as its access_token
          when using the Client-Server API. This can be anything you want.
        '';
      };
      homeserverToken = mkOption {
        type = types.str;
        description = ''
          This is the token that the homeserver will use when sending requests
          to the app service. This can be anything you want.
        '';
      };
    };
  };

  config = mkIf cfg.enable {
    meta.maintainers = [ maintainers.sumnerevans ];

    assertions = [{
      assertion = cfg.useLocalSynapse -> config.services.matrix-synapse-custom.enable;
      message = ''
        Heisenbridge must be running on the same server as Synapse if
        'useLocalSynapse' is enabled.
      '';
    }];

    services.matrix-synapse-custom.appServiceConfigFiles = mkIf cfg.useLocalSynapse [
      heisenbridgeConfigYaml
    ];

    # Create a user for heisenbridge.
    users.users.heisenbridge = {
      group = "heisenbridge";
      isSystemUser = true;
    };
    users.groups.heisenbridge = { };

    # Open ports for identd.
    networking.firewall.allowedTCPPorts = mkIf cfg.identd.enable [ 113 ];

    systemd.services.heisenbridge = {
      description = "Heisenbridge Matrix IRC bridge";
      after = optional cfg.useLocalSynapse "matrix-synapse.target";
      wantedBy = [ "multi-user.target" ];
      serviceConfig = {
        ExecStart = ''
          ${heisenbridge}/bin/heisenbridge \
            --config ${heisenbridgeConfigYaml} \
            --verbose --verbose \
            --listen-address ${cfg.listenAddress} \
            --listen-port ${toString cfg.listenPort} \
            --uid heisenbridge \
            --gid heisenbridge \
            ${optionalString cfg.identd.enable "--identd"} \
            ${optionalString (cfg.ownerId != null) "--owner ${cfg.ownerId}"} \
            ${cfg.homeserver}
        '';
        Restart = "on-failure";
      };
    };
  };
}

D modules/services/matrix/linkedin-matrix.nix => modules/services/matrix/linkedin-matrix.nix +0 -237
@@ 1,237 0,0 @@
{ config, lib, pkgs, ... }: with lib; let
  cfg = config.services.linkedin-matrix;
  synapseCfg = config.services.matrix-synapse-custom;

  linkedin-matrix = pkgs.callPackage ../../../pkgs/linkedin-matrix.nix { };

  linkedinMatrixAppserviceConfig = {
    id = "linkedin";
    url = "http://${cfg.listenAddress}:${toString cfg.listenPort}";
    as_token = cfg.appServiceToken;
    hs_token = cfg.homeserverToken;
    rate_limited = false;
    sender_localpart = "XDUsekmAmWcmL1FWrgZ8E7ih-p0vffI3kMiezV43Sw29GLBQAQ-0_GRJXMQXlVb0";
    namespaces = {
      users = [
        { regex = "@li_.*:nevarro.space"; exclusive = true; }
        { regex = "@linkedinbot:nevarro.space"; exclusive = true; }
      ];
      aliases = [ ];
      rooms = [ ];
    };
  };

  yamlFormat = pkgs.formats.yaml { };

  linkedinMatrixAppserviceConfigYaml = yamlFormat.generate "linkedin-matrix-registration.yaml" linkedinMatrixAppserviceConfig;

  linkedinMatrixConfig = {
    homeserver = {
      address = cfg.homeserver;
      domain = config.networking.domain;
      verify_ssl = false;
      asmux = false;
      http_retry_count = 4;
    };

    metrics = {
      enabled = true;
      listen_port = 9010;
    };

    appservice = {
      address = "http://${cfg.listenAddress}:${toString cfg.listenPort}";
      hostname = cfg.listenAddress;
      port = cfg.listenPort;
      max_body_size = 1;
      database = "postgresql://linkedinmatrix:linkedinmatrix@localhost/linkedin-matrix";
      database_opts = { min_size = 5; max_size = 10; };
      id = "linkedin";
      bot_username = cfg.botUsername;
      bot_displayname = "LinkedIn bridge bot";
      bot_avatar = "mxc://sumnerevans.com/XMtwdeUBnxYvWNFFrfeTSHqB";
      as_token = cfg.appServiceToken;
      hs_token = cfg.homeserverToken;
      ephemeral_events = true;

      provisioning = {
        enabled = true;
        prefix = "/provision";
        shared_secret = "supersecrettoken"; # provisioning API is not public
      };
    };

    bridge = {
      username_template = "li_{userid}";
      displayname_template = "{displayname}";
      displayname_preference = [ "name" "first_name" ];
      set_topic_on_dms = true;
      command_prefix = "!li";
      initial_chat_sync = 20;
      invite_own_puppet_to_pm = true;
      sync_with_custom_puppets = false;
      sync_direct_chat_list = true;
      presence = false;
      update_avatar_initial_sync = true;
      login_shared_secret_map = {
        "nevarro.space" = removeSuffix "\n" (readFile synapseCfg.sharedSecretAuthFile);
      };
      encryption = {
        allow = true;
        default = true;
        key_sharing = {
          allow = true;
          require_cross_signing = false;
          require_verification = true;
        };
      };
      delivery_receipts = true;
      backfill = {
        invite_own_puppet = true;
        initial_limit = 20;
        missed_limit = 20;
        disable_notifications = true;
      };
      temporary_disconnect_notices = true;
      mute_bridging = true;
      permissions = {
        "nevarro.space" = "user";
        "@sumner:sumnerevans.com" = "admin";
        "@sumner:nevarro.space" = "admin";
      };
    };

    logging = {
      version = 1;

      formatters.journal_fmt.format = "[%(name)s] %(message)s";
      handlers = {
        journal = {
          class = "systemd.journal.JournalHandler";
          formatter = "journal_fmt";
          SYSLOG_IDENTIFIER = "linkedin-matrix";
        };
      };
      loggers = {
        aiohttp.level = "DEBUG";
        mau.level = "DEBUG";
        paho.level = "DEBUG";
        root.level = "DEBUG";
      };
      root = { level = "DEBUG"; handlers = [ "journal" ]; };
    };
  };

  linkedinMatrixConfigYaml = yamlFormat.generate "linkedin-config.yaml" linkedinMatrixConfig;
in
{
  options = {
    services.linkedin-matrix = {
      enable = mkEnableOption "linkedin-matrix, a LinkedIn Messaging <-> Matrix bridge.";
      useLocalSynapse = mkOption {
        type = types.bool;
        default = true;
        description = "Whether or not to use the local synapse instance.";
      };
      homeserver = mkOption {
        type = types.str;
        default = "http://localhost:8008";
        description = "The URL of the Matrix homeserver.";
      };
      listenAddress = mkOption {
        type = types.str;
        default = "127.0.0.1";
        description = "The address for linkedin-matrix to listen on.";
      };
      listenPort = mkOption {
        type = types.int;
        default = 9899;
        description = "The port for linkedin-matrix to listen on.";
      };
      botUsername = mkOption {
        type = types.str;
        default = "linkedinbot";
        description = "The localpart of the linkedin-matrix admin bot's username.";
      };
      appServiceToken = mkOption {
        type = types.str;
        description = ''
          This is the token that the app service should use as its access_token
          when using the Client-Server API. This can be anything you want.
        '';
      };
      homeserverToken = mkOption {
        type = types.str;
        description = ''
          This is the token that the homeserver will use when sending requests
          to the app service. This can be anything you want.
        '';
      };
    };
  };

  config = mkIf cfg.enable {
    meta.maintainers = [ maintainers.sumnerevans ];

    assertions = [
      {
        assertion = cfg.useLocalSynapse -> config.services.matrix-synapse-custom.enable;
        message = ''
          LinkedIn must be running on the same server as Synapse if
          'useLocalSynapse' is enabled.
        '';
      }
    ];

    services.matrix-synapse-custom.appServiceConfigFiles = mkIf cfg.useLocalSynapse [
      linkedinMatrixAppserviceConfigYaml
    ];

    # Create a user for linkedin-matrix.
    users.users.linkedinmatrix = {
      group = "linkedinmatrix";
      isSystemUser = true;
    };
    users.groups.linkedinmatrix = { };

    # Create a database user for linkedin-matrix
    services.postgresql.ensureDatabases = [ "linkedin-matrix" ];
    services.postgresql.ensureUsers = [
      {
        name = "linkedinmatrix";
        ensurePermissions = {
          "DATABASE \"linkedin-matrix\"" = "ALL PRIVILEGES";
          "ALL TABLES IN SCHEMA public" = "ALL PRIVILEGES";
        };
      }
    ];

    systemd.services.linkedin-matrix = {
      description = "LinkedIn Messaging <-> Matrix Bridge";
      after = optional cfg.useLocalSynapse "matrix-synapse.target";
      wantedBy = [ "multi-user.target" ];
      serviceConfig = {
        User = "linkedinmatrix";
        Group = "linkedinmatrix";
        ExecStart = ''
          ${linkedin-matrix}/bin/linkedin-matrix \
            --config ${linkedinMatrixConfigYaml} \
            --no-update
        '';
        Restart = "on-failure";
      };
    };

    services.prometheus = {
      enable = true;
      scrapeConfigs = [
        {
          job_name = "linkedinmatirx";
          scrape_interval = "15s";
          metrics_path = "/";
          static_configs = [{ targets = [ "0.0.0.0:9010" ]; }];
        }
      ];
    };
  };
}

D modules/services/matrix/matrix-vacation-responder.nix => modules/services/matrix/matrix-vacation-responder.nix +0 -68
@@ 1,68 0,0 @@
{ config, lib, pkgs, ... }: with lib; let
  cfg = config.services.matrix-vacation-responder;
  matrix-vacation-responder = pkgs.callPackage ../../../pkgs/matrix-vacation-responder { };

  vacationResponderConfig = {
    homeserver = cfg.homeserver;
    username = cfg.username;
    password_file = cfg.passwordFile;

    vacation_message = ''
      This is no longer my primary Matrix account.
      Please send your messages to [@sumner:nevarro.space](https://matrix.to/#/@sumner:nevarro.space)
    '';
    vacation_message_min_interval = 1440;
    respond_to_groups = true;
  };
  format = pkgs.formats.yaml { };
  matrixVacationResponderConfigYaml = format.generate "matrix-vacation-responder.config.yaml" vacationResponderConfig;
in
{
  options = {
    services.matrix-vacation-responder = {
      enable = mkEnableOption "matrix-vacation-responder";
      username = mkOption {
        type = types.str;
      };
      homeserver = mkOption {
        type = types.str;
        default = "http://localhost:8008";
      };
      passwordFile = mkOption {
        type = types.path;
        default = "/etc/nixos/secrets/matrix/bots/vacation-responder-password";
      };
      dataDir = mkOption {
        type = types.path;
        default = "/var/lib/matrixvacationresponder";
      };
    };
  };

  config = mkIf cfg.enable {
    systemd.services.matrix-vacation-responder = {
      description = "Matrix Vacation Responder";
      after = [ "matrix-synapse.target" ];
      wantedBy = [ "multi-user.target" ];
      serviceConfig = {
        ExecStart = ''
          ${matrix-vacation-responder}/bin/matrix-vacation-responder \
            --config ${matrixVacationResponderConfigYaml} \
            --dbfile ${cfg.dataDir}/matrix-vacation-responder.db
        '';
        Restart = "on-failure";
        User = "matrixvacationresponder";
      };
    };

    users = {
      users.matrixvacationresponder = {
        group = "matrixvacationresponder";
        isSystemUser = true;
        home = cfg.dataDir;
        createHome = true;
      };
      groups.matrixvacationresponder = { };
    };
  };
}

D modules/services/matrix/mjolnir.nix => modules/services/matrix/mjolnir.nix +0 -24
@@ 1,24 0,0 @@
{ config, lib, pkgs, ... }: with lib; let
  mjolnirCfg = config.services.mjolnir;
in
{
  services.mjolnir = {
    homeserverUrl = "https://matrix.nevarro.space";

    pantalaimon = {
      enable = true;
      username = "marshal";
      passwordFile = "/etc/nixos/secrets/matrix/bots/marshal";
      options.listenPort = 8100;
    };

    managementRoom = "#mjolnir:nevarro.space";

    settings = {
      protectAllJoinedRooms = true;
    };
  };
  services.pantalaimon-headless.instances = mkIf mjolnirCfg.enable {
    mjolnir.listenPort = 8100;
  };
}

D modules/services/matrix/quotesfilebot.nix => modules/services/matrix/quotesfilebot.nix +0 -70
@@ 1,70 0,0 @@
{ config, lib, pkgs, ... }: with lib; let
  cfg = config.services.quotesfilebot;
  quotesfilebot = pkgs.callPackage ../../../pkgs/quotesfilebot {};

  quotesfilebotConfig = {
    DefaultReactionEmoji = cfg.defaultReactionEmoji;
    Username = cfg.username;
    Homeserver = cfg.homeserver;
    PasswordFile = cfg.passwordFile;
    JoinMessage = cfg.joinMessage;
  };
  format = pkgs.formats.json {};
  quotesfilebotConfigJson = format.generate "quotesfilebot.json" quotesfilebotConfig;
in
{
  options = {
    services.quotesfilebot = {
      enable = mkEnableOption "quotesfilebot";
      defaultReactionEmoji = mkOption {
        type = types.str;
        default = "💬";
      };
      username = mkOption {
        type = types.str;
        default = "@quotesfilebot:${config.networking.domain}";
      };
      homeserver = mkOption {
        type = types.str;
        default = "http://localhost:8008";
      };
      passwordFile = mkOption {
        type = types.path;
        default = "/var/lib/quotesfilebot/passwordfile";
      };
      dataDir = mkOption {
        type = types.path;
        default = "/var/lib/quotesfilebot";
      };
      joinMessage = mkOption {
        type = types.str;
        default = "I'm a quotesfilebot!";
      };
    };
  };

  config = mkIf cfg.enable {
    systemd.services.quotesfilebot = {
      description = "Quotesfilebot";
      after = [ "matrix-synapse.target" ];
      wantedBy = [ "multi-user.target" ];
      serviceConfig = {
        ExecStart = ''
          ${quotesfilebot}/bin/quotes-file-bot --config ${quotesfilebotConfigJson}
        '';
        Restart = "on-failure";
        User = "quotesfilebot";
      };
    };

    users = {
      users.quotesfilebot = {
        group = "quotesfilebot";
        isSystemUser = true;
        home = cfg.dataDir;
        createHome = true;
      };
      groups.quotesfilebot = {};
    };
  };
}

D modules/services/matrix/standupbot.nix => modules/services/matrix/standupbot.nix +0 -60
@@ 1,60 0,0 @@
{ config, lib, pkgs, ... }: with lib; let
  cfg = config.services.standupbot;
  standupbot = pkgs.callPackage ../../../pkgs/standupbot {};

  standupbotConfig = {
    Username = cfg.username;
    Homeserver = cfg.homeserver;
    PasswordFile = cfg.passwordFile;
  };
  format = pkgs.formats.json {};
  standupbotConfigJson = format.generate "standupbot.config.json" standupbotConfig;
in
{
  options = {
    services.standupbot = {
      enable = mkEnableOption "standupbot";
      username = mkOption {
        type = types.str;
        default = "@standupbot:${config.networking.domain}";
      };
      homeserver = mkOption {
        type = types.str;
        default = "http://localhost:8008";
      };
      passwordFile = mkOption {
        type = types.path;
        default = "/var/lib/standupbot/passwordfile";
      };
      dataDir = mkOption {
        type = types.path;
        default = "/var/lib/standupbot";
      };
    };
  };

  config = mkIf cfg.enable {
    systemd.services.standupbot = {
      description = "Standupbot";
      after = [ "matrix-synapse.target" ];
      wantedBy = [ "multi-user.target" ];
      serviceConfig = {
        ExecStart = ''
          ${standupbot}/bin/standupbot --config ${standupbotConfigJson}
        '';
        Restart = "on-failure";
        User = "standupbot";
      };
    };

    users = {
      users.standupbot = {
        group = "standupbot";
        isSystemUser = true;
        home = cfg.dataDir;
        createHome = true;
      };
      groups.standupbot = {};
    };
  };
}

D modules/services/matrix/synapse/cleanup-synapse.nix => modules/services/matrix/synapse/cleanup-synapse.nix +0 -169
@@ 1,169 0,0 @@
# See:
# - https://levans.fr/shrink-synapse-database.html
# - https://foss-notes.blog.nomagic.uk/2021/03/matrix-database-house-cleaning/
# - https://git.envs.net/envs/matrix-conf/src/branch/master/usr/local/bin
{ config, lib, pkgs, ... }:
with pkgs;
with lib;
let
  cfg = config.services.cleanup-synapse;
  synapseCfg = config.services.matrix-synapse-custom;

  adminUrl = "http://localhost:8008/_synapse/admin/v1";
  adminMediaRepoUrl = "http://localhost:8011/_synapse/admin/v1";
  adminCurl = ''${curl}/bin/curl --header "Authorization: Bearer $CLEANUP_ACCESS_TOKEN" '';

  # Delete old cached remote media
  purgeRemoteMedia = writeShellScriptBin "purge-remote-media" ''
    set -xe

    now=$(${coreutils}/bin/date +%s%N | ${coreutils}/bin/cut -b1-13)
    nintey_days_ago=$(( now - 7776000000 ))

    ${adminCurl} \
      -X POST \
      -H "Content-Type: application/json" \
      -d "{}" \
      "${adminMediaRepoUrl}/purge_media_cache?before_ts=$nintey_days_ago"
  '';

  # Get rid of any rooms that aren't joined by anyone from the homeserver.
  cleanupForgottenRooms = writeShellScriptBin "cleanup-forgotten" ''
    set -xe

    roomlist=$(mktemp)
    to_purge=$(mktemp)

    ${adminCurl} '${adminUrl}/rooms?limit=1000' > $roomlist

    # Find all of the rooms that have no local users.
    ${jq}/bin/jq -r '.rooms[] | select(.joined_local_members == 0) | .room_id' < $roomlist > $to_purge

    while read room_id; do 
      echo "deleting $room_id..."
      ${adminCurl} \
        -X DELETE \
        -H "Content-Type: application/json" \
        -d "{}" \
        "${adminUrl}/rooms/$room_id"
    done < $to_purge
  '';

  # # Delete all non-local room history that is from before 90 days ago.
  # cleanupHistory = writeShellScriptBin "cleanup-history" ''
  #   set -xe
  #   roomlist=$(mktemp)

  #   ${adminCurl} '${adminUrl}/rooms?limit=1000' |
  #     ${jq}/bin/jq -r '.rooms[] | .room_id' > $roomlist

  #   now=$(${coreutils}/bin/date +%s%N | ${coreutils}/bin/cut -b1-13)
  #   nintey_days_ago=$(( now - 7776000000 ))

  #   while read room_id; do 
  #     echo "purging history for $room_id..."

  #     ${adminCurl} -X POST -H "Content-Type: application/json" \
  #       -d "{ \"delete_local_events\": false, \"purge_up_to_ts\": $nintey_days_ago }" \
  #       "${adminUrl}/purge_history/$room_id"
  #   done < $roomlist
  # '';

  largeStateRoomsQuery = "SELECT room_id FROM state_groups GROUP BY room_id ORDER BY count(*)";
  compressState = writeShellScriptBin "compress-state" ''
    set -xe
    bigrooms=$(mktemp)
    echo "\\copy (${largeStateRoomsQuery}) to '$bigrooms' with CSV" |
      ${postgresql}/bin/psql -d matrix-synapse

    echo 'Disabling autovacuum on state_groups_state'
    echo 'ALTER TABLE state_groups_state SET (AUTOVACUUM_ENABLED = FALSE);' |
      /run/wrappers/bin/sudo -u postgres ${postgresql}/bin/psql -d matrix-synapse

    while read room_id; do
      echo "compressing state for $room_id"

      state_compressor=$(mktemp)

      ${matrix-synapse-tools.rust-synapse-compress-state}/bin/synapse-compress-state \
        -t \
        -o $state_compressor \
        -m 1000 \
        -p "host=localhost user=matrix-synapse password=synapse dbname=matrix-synapse" \
        -r $room_id

      if test -s "$state_compressor"
      then
        ${postgresql}/bin/psql -d matrix-synapse -c '\set ON_ERROR_STOP on' -f $state_compressor
      fi

      echo "done compressing state for $room_id"

      rm $state_compressor
    done <$bigrooms

    echo 'Enabling autovacuum on state_groups_state'
    echo 'ALTER TABLE state_groups_state SET (AUTOVACUUM_ENABLED = TRUE);' |
      /run/wrappers/bin/sudo -u postgres ${postgresql}/bin/psql -d matrix-synapse

    echo 'Running VACUUM and ANALYZE for state_groups_state ...'
    echo 'VACUUM FULL ANALYZE state_groups_state' |
      /run/wrappers/bin/sudo -u postgres ${postgresql}/bin/psql -d matrix-synapse

    rm $bigrooms
  '';

  reindexAndVaccum = writeShellScriptBin "reindex-and-vaccum" ''
    set -xe
    systemctl stop matrix-synapse.target

    echo 'REINDEX (VERBOSE) DATABASE "matrix-synapse"' |
      /run/wrappers/bin/sudo -u postgres ${postgresql}/bin/psql -d matrix-synapse

    echo "VACUUM FULL VERBOSE" |
      /run/wrappers/bin/sudo -u postgres ${postgresql}/bin/psql -d matrix-synapse

    systemctl start matrix-synapse.target
  '';

  cleanupSynapseScript = writeShellScriptBin "cleanup-synapse" ''
    set -xe
    ${purgeRemoteMedia}/bin/purge-remote-media
    ${cleanupForgottenRooms}/bin/cleanup-forgotten
    ${compressState}/bin/compress-state
    ${reindexAndVaccum}/bin/reindex-and-vaccum
  '';
in
{
  options.services.cleanup-synapse = {
    environmentFile = mkOption {
      type = types.path;
      description = "The environment file for the synapse cleanup script.";
    };
  };

  config = mkIf synapseCfg.enable {
    systemd.services.cleanup-synapse = {
      description = "Cleanup synapse";
      startAt = "*-10"; # Cleanup everything on the 10th of each month.
      serviceConfig = {
        ExecStart = "${cleanupSynapseScript}/bin/cleanup-synapse";
        EnvironmentFile = cfg.environmentFile;
        PrivateTmp = true;
        ProtectSystem = true;
        ProtectHome = "read-only";
      };
    };

    # Allow root to manage matrix-synapse database.
    services.postgresql.ensureUsers = [
      {
        name = "root";
        ensurePermissions = {
          "DATABASE \"matrix-synapse\"" = "ALL PRIVILEGES";
          "ALL TABLES IN SCHEMA public" = "ALL PRIVILEGES";
        };
      }
    ];
  };
}

D modules/services/matrix/synapse/default.nix => modules/services/matrix/synapse/default.nix +0 -441
@@ 1,441 0,0 @@
# See: https://nixos.org/nixos/manual/index.html#module-services-matrix-synapse
{ config, lib, pkgs, ... }: with lib;
let
  matrixDomain = "matrix.${config.networking.domain}";
  cfg = config.services.matrix-synapse-custom;

  # Custom package that tracks with the latest release of Synapse.
  package = pkgs.matrix-synapse.overridePythonAttrs (
    old: rec {
      pname = "matrix-synapse";
      version = "1.54.0";

      src = pkgs.python3Packages.fetchPypi {
        inherit pname version;
        sha256 = "sha256-TmUu6KpL111mjd4Dgm/kYnKpDZjw9rWrpMQ5isXmWRo=";
      };

      doCheck = false;
    }
  );

  packageWithModules = package.python.withPackages (ps: [
    (package.python.pkgs.toPythonModule package)
    (pkgs.matrix-synapse-plugins.matrix-synapse-shared-secret-auth.overridePythonAttrs (old: rec {
      pname = "matrix-synapse-shared-secret-auth";
      version = "2.0.1";

      src = pkgs.fetchFromGitHub {
        owner = "devture";
        repo = "matrix-synapse-shared-secret-auth";
        rev = version;
        sha256 = "sha256-kaok5IwKx97FYDrVIGAtUJfExqDln5vxEKrZda2RdzE=";
      };
      buildInputs = [ pkgs.matrix-synapse ];
    }))
  ]);

  yamlFormat = pkgs.formats.yaml { };

  sharedConfig = (import ./shared-config.nix ({ inherit config lib pkgs; }));
  sharedConfigFile = yamlFormat.generate
    "matrix-synapse-config.yaml"
    sharedConfig;

  mkSynapseWorkerService = config: recursiveUpdate config {
    after = [ "matrix-synapse.service" ];
    partOf = [ "matrix-synapse.target" ];
    wantedBy = [ "matrix-synapse.target" ];
    serviceConfig = {
      Type = "notify";
      User = "matrix-synapse";
      Group = "matrix-synapse";
      WorkingDirectory = cfg.dataDir;
      ExecReload = "${pkgs.util-linux}/bin/kill -HUP $MAINPID";
      Restart = "on-failure";
      UMask = "0077";
    };
  };

  mkSynapseWorkerConfig = port: config:
    let
      newConfig = {
        # The replication listener on the main synapse process.
        worker_replication_host = "127.0.0.1";
        worker_replication_http_port = 9093;

        # Default to generic worker.
        worker_app = "synapse.app.generic_worker";
      } // config;
      newWorkerListeners = (config.worker_listeners or [ ]) ++ [
        {
          type = "metrics";
          bind_address = "";
          port = port;
        }
      ];
    in
    newConfig // { worker_listeners = newWorkerListeners; };

  federationSender1ConfigFile = yamlFormat.generate
    "federation-sender-1.yaml"
    (mkSynapseWorkerConfig 9101 {
      worker_app = "synapse.app.federation_sender";
      worker_name = "federation_sender1";
    });

  federationReader1ConfigFile = yamlFormat.generate
    "federation-reader-1.yaml"
    (mkSynapseWorkerConfig 9102 {
      worker_name = "federation_reader1";
      worker_listeners = [
        # Federation
        {
          type = "http";
          port = 8009;
          bind_address = "0.0.0.0";
          tls = false;
          x_forwarded = true;
          resources = [
            { names = [ "federation" ]; compress = false; }
          ];
        }
      ];
    });

  eventPersister1ConfigFile = yamlFormat.generate
    "event-persister-1.yaml"
    (mkSynapseWorkerConfig 9103 {
      worker_name = "event_persister1";
      # The event persister needs a replication listener
      worker_listeners = [
        {
          type = "http";
          port = 9091;
          bind_address = "127.0.0.1";
          resources = [{ names = [ "replication" ]; }];
        }
      ];
    });

  synchotron1ConfigFile = yamlFormat.generate
    "synchotron-1.yaml"
    (mkSynapseWorkerConfig 9104 {
      worker_name = "synchotron1";
      # The event persister needs a replication listener
      worker_listeners = [
        {
          type = "http";
          port = 8010;
          bind_address = "0.0.0.0";
          resources = [{ names = [ "client" ]; }];
        }
      ];
    });

  mediaRepo1ConfigFile = yamlFormat.generate
    "media-repo-1.yaml"
    (mkSynapseWorkerConfig 9105 {
      worker_name = "media_repo1";
      worker_app = "synapse.app.media_repository";
      # The event persister needs a replication listener
      worker_listeners = [
        {
          type = "http";
          port = 8011;
          bind_address = "0.0.0.0";
          resources = [{ names = [ "media" ]; }];
        }
      ];
    });
in
{
  imports = [
    ./cleanup-synapse.nix
  ];

  options = {
    services.matrix-synapse-custom = {
      enable = mkEnableOption "Synapse, the reference Matrix homeserver";

      appServiceConfigFiles = mkOption {
        type = types.listOf types.path;
        default = [ ];
        description = ''
          A list of application service config file to use.
        '';
      };

      dataDir = mkOption {
        type = types.path;
        default = "/var/lib/matrix-synapse";
        description = ''
          The directory where matrix-synapse stores its stateful data such as
          certificates, media and uploads.
        '';
      };

      registrationSharedSecretFile = mkOption {
        type = types.path;
        description = ''
          The path to a file that contains the shared registration secret.
        '';
      };

      sharedSecretAuthFile = mkOption {
        type = with types; nullOr path;
        default = null;
        description = ''
          The path to a file that contains the shared secret auth secret.
        '';
      };

      emailCfg = mkOption {
        type = with types; attrsOf anything;
        default = { };
        description = "The email configuration.";
      };
    };
  };

  config = mkIf cfg.enable {
    # Create a user and group for Synapse
    users.users.matrix-synapse = {
      group = "matrix-synapse";
      home = cfg.dataDir;
      createHome = true;
      shell = "${pkgs.bash}/bin/bash";
      uid = config.ids.uids.matrix-synapse;
    };

    users.groups.matrix-synapse = {
      gid = config.ids.gids.matrix-synapse;
    };

    systemd.targets.matrix-synapse = {
      description = "Synapse processes";
      after = [ "network.target" "postgresql.service" ];
      wantedBy = [ "multi-user.target" ];
    };

    # Run the main Synapse process
    systemd.services.matrix-synapse = {
      description = "Synapse Matrix homeserver";
      partOf = [ "matrix-synapse.target" ];
      wantedBy = [ "matrix-synapse.target" ];
      preStart = ''
        ${packageWithModules}/bin/synapse_homeserver \
          --config-path ${sharedConfigFile} \
          --keys-directory ${cfg.dataDir} \
          --generate-keys
      '';
      serviceConfig = {
        Type = "notify";
        User = "matrix-synapse";
        Group = "matrix-synapse";
        WorkingDirectory = cfg.dataDir;
        ExecStartPre = [
          ("+" + (pkgs.writeShellScript "matrix-synapse-fix-permissions" ''
            chown matrix-synapse:matrix-synapse ${cfg.dataDir}/homeserver.signing.key
            chmod 0600 ${cfg.dataDir}/homeserver.signing.key
          ''))
        ];
        ExecStart = ''
          ${packageWithModules}/bin/synapse_homeserver \
            --config-path ${sharedConfigFile} \
            --keys-directory ${cfg.dataDir}
        '';
        ExecReload = "${pkgs.util-linux}/bin/kill -HUP $MAINPID";
        Restart = "on-failure";
        UMask = "0077";
      };
    };

    # Run the federation sender worker
    systemd.services.matrix-synapse-federation-sender1 = mkSynapseWorkerService {
      description = "Synapse Matrix federation sender 1";
      serviceConfig.ExecStart = ''
        ${packageWithModules}/bin/python -m synapse.app.federation_sender \
          --config-path ${sharedConfigFile} \
          --config-path ${federationSender1ConfigFile} \
          --keys-directory ${cfg.dataDir}
      '';
    };

    # Run the federation reader worker
    systemd.services.matrix-synapse-federation-reader1 = mkSynapseWorkerService {
      description = "Synapse Matrix federation reader 1";
      serviceConfig.ExecStart = ''
        ${packageWithModules}/bin/python -m synapse.app.generic_worker \
          --config-path ${sharedConfigFile} \
          --config-path ${federationReader1ConfigFile} \
          --keys-directory ${cfg.dataDir}
      '';
    };

    # Run the event persister worker
    systemd.services.matrix-synapse-event-persister1 = mkSynapseWorkerService {
      description = "Synapse Matrix event persister 1";
      serviceConfig.ExecStart = ''
        ${packageWithModules}/bin/python -m synapse.app.generic_worker \
          --config-path ${sharedConfigFile} \
          --config-path ${eventPersister1ConfigFile} \
          --keys-directory ${cfg.dataDir}
      '';
    };

    # Run the synchotron worker
    systemd.services.matrix-synapse-synchotron1 = mkSynapseWorkerService {
      description = "Synapse Matrix synchotron 1";
      serviceConfig.ExecStart = ''
        ${packageWithModules}/bin/python -m synapse.app.generic_worker \
          --config-path ${sharedConfigFile} \
          --config-path ${synchotron1ConfigFile} \
          --keys-directory ${cfg.dataDir}
      '';
    };

    # Run the media repo worker
    systemd.services.matrix-synapse-media-repo1 = mkSynapseWorkerService {
      description = "Synapse Matrix media repo 1";
      serviceConfig.ExecStart = ''
        ${packageWithModules}/bin/python -m synapse.app.media_repository \
          --config-path ${sharedConfigFile} \
          --config-path ${mediaRepo1ConfigFile} \
          --keys-directory ${cfg.dataDir}
      '';
    };

    # Make sure that Postgres is setup for Synapse.
    services.postgresql = {
      enable = true;
      initialScript = pkgs.writeText "synapse-init.sql" ''
        CREATE ROLE "matrix-synapse" WITH LOGIN PASSWORD 'synapse';
        CREATE DATABASE "matrix-synapse" WITH OWNER "matrix-synapse"
          TEMPLATE template0
          LC_COLLATE = "C"
          LC_CTYPE = "C";
      '';
    };

    # Ensure that Redis is setup for Synapse.
    services.redis.enable = true;

    # Set up nginx to forward requests properly.
    services.nginx = {
      enable = true;
      virtualHosts = {
        ${config.networking.domain} = {
          enableACME = true;
          forceSSL = true;

          locations =
            let
              server = { "m.server" = "${matrixDomain}:443"; };
              client = {
                "m.homeserver" = { "base_url" = "https://${matrixDomain}"; };
                "m.identity_server" = { "base_url" = "https://vector.im"; };
              };
            in
            {
              "= /.well-known/matrix/server" = {
                extraConfig = ''
                  add_header Content-Type application/json;
                '';
                return = "200 '${builtins.toJSON server}'";
              };
              "= /.well-known/matrix/client" = {
                extraConfig = ''
                  add_header Content-Type application/json;
                  add_header Access-Control-Allow-Origin *;
                '';
                return = "200 '${builtins.toJSON client}'";
              };
            };
        };

        # Reverse proxy for Matrix client-server and server-server communication
        ${matrixDomain} = {
          enableACME = true;
          forceSSL = true;

          # If they access root, redirect to Element. If they access the API, then
          # forward on to Synapse.
          locations."/".return = "301 https://app.element.io";
          locations."/_matrix" = {
            proxyPass = "http://0.0.0.0:8008"; # without a trailing /
            extraConfig = ''
              access_log /var/log/nginx/matrix.access.log;
            '';
          };
          locations."/_matrix/federation/" = {
            proxyPass = "http://0.0.0.0:8009"; # without a trailing /
            extraConfig = ''
              access_log /var/log/nginx/matrix-federation.access.log;
            '';
          };
          locations."~ ^/_matrix/client/.*/(sync|events|initialSync)" = {
            proxyPass = "http://0.0.0.0:8010"; # without a trailing /
            extraConfig = ''
              access_log /var/log/nginx/matrix-synchotron.access.log;
            '';
          };
          locations."~ ^/(_matrix/media|_synapse/admin/v1/(purge_media_cache|(room|user)/.*/media.*|media/.*|quarantine_media/.*|users/.*/media))" = {
            proxyPass = "http://0.0.0.0:8011"; # without a trailing /
            extraConfig = ''
              access_log /var/log/nginx/matrix-media-repo.access.log;
            '';
          };
        };
      };
    };

    # Make sure that Prometheus is setup for Synapse.
    services.prometheus = {
      enable = true;
      scrapeConfigs = [
        {
          job_name = "synapse";
          scrape_interval = "15s";
          metrics_path = "/_synapse/metrics";
          static_configs = [
            {
              targets = [ "0.0.0.0:9009" ];
              labels = { instance = matrixDomain; job = "master"; index = "1"; };
            }
            {
              # Federation sender 1
              targets = [ "0.0.0.0:9101" ];
              labels = { instance = matrixDomain; job = "federation_sender"; index = "1"; };
            }
            {
              # Federation reader 1
              targets = [ "0.0.0.0:9102" ];
              labels = { instance = matrixDomain; job = "federation_reader"; index = "1"; };
            }
            {
              # Event persister 1
              targets = [ "0.0.0.0:9103" ];
              labels = { instance = matrixDomain; job = "event_persister"; index = "1"; };
            }
            {
              # Synchotron 1
              targets = [ "0.0.0.0:9104" ];
              labels = { instance = matrixDomain; job = "synchotron"; index = "1"; };
            }
            {
              # Media repo 1
              targets = [ "0.0.0.0:9105" ];
              labels = { instance = matrixDomain; job = "media_repo"; index = "1"; };
            }
          ];
        }
      ];
    };

    # Add a backup service.
    services.backup.backups.matrix = {
      path = config.services.matrix-synapse.dataDir;
    };
  };
}

D modules/services/matrix/synapse/shared-config.nix => modules/services/matrix/synapse/shared-config.nix +0 -161
@@ 1,161 0,0 @@
# This is organized to match the sections in
# https://github.com/matrix-org/synapse/blob/develop/docs/sample_config.yaml
{ config, lib, pkgs }: with lib;
let
  cfg = config.services.matrix-synapse-custom;
  yamlFormat = pkgs.formats.yaml { };

  logConfig = {
    version = 1;
    formatters.journal_fmt.format = "%(name)s: [%(request)s] %(message)s";
    filters.context = {
      "()" = "synapse.util.logcontext.LoggingContextFilter";
      request = "";
    };
    handlers.journal = {
      class = "systemd.journal.JournalHandler";
      formatter = "journal_fmt";
      filters = [ "context" ];
      SYSLOG_IDENTIFIER = "synapse";
    };
    root = { level = "INFO"; handlers = [ "journal" ]; };
    loggers = {
      shared_secret_authenticator = { level = "INFO"; handlers = [ "journal" ]; };
    };
    disable_existing_loggers = false;
  };
in
{
  # Modules
  modules =
    if (cfg.sharedSecretAuthFile == null) then [ ] else [
      {
        module = "shared_secret_authenticator.SharedSecretAuthProvider";
        config = {
          shared_secret = removeSuffix "\n" (readFile cfg.sharedSecretAuthFile);
        };
      }
    ];

  # Server
  server_name = config.networking.domain;
  pid_file = "/run/matrix-synapse.pid";
  default_room_version = "9";
  public_baseurl = "https://matrix.${config.networking.domain}";
  listeners = [
    # CS API and Federation
    {
      type = "http";
      port = 8008;
      bind_address = "0.0.0.0";
      tls = false;
      x_forwarded = true;
      resources = [
        { names = [ "federation" "client" ]; compress = false; }
      ];
    }

    # Metrics
    {
      port = 9009;
      bind_address = "0.0.0.0";
      tls = false;
      type = "metrics";
    }

    # Replication
    {
      type = "http";
      port = 9093;
      bind_address = "127.0.0.1";
      resources = [{ names = [ "replication" ]; }];
    }
  ];

  # Caching
  event_cache_size = "25K";
  caches.global_factor = 1.0;

  # Database
  database = {
    name = "psycopg2";
    args = { user = "matrix-synapse"; database = "matrix-synapse"; };
  };

  # Logging
  log_config = yamlFormat.generate "matrix-synapse-log-config.yaml" logConfig;

  # Media store
  enable_media_repo = false;
  media_store_path = "${cfg.dataDir}/media";
  max_upload_size = "250M";
  url_preview_enabled = true;
  url_preview_ip_range_blacklist = [
    "127.0.0.0/8"
    "10.0.0.0/8"
    "172.16.0.0/12"
    "192.168.0.0/16"
    "100.64.0.0/10"
    "169.254.0.0/16"
    "::1/128"
    "fe80::/64"
    "fc00::/7"
  ];

  url_preview_url_blacklist = [
    # blacklist any URL with a username in its URI
    { username = "*"; }

    # Don't try previews for Linear.
    { netloc = "linear.app"; }
  ];

  # TURN
  # Configure coturn to point at the matrix.org servers.
  # TODO actually figure this out eventually
  turn_uris = [
    "turn:turn.matrix.org?transport=udp"
    "turn:turn.matrix.org?transport=tcp"
  ];
  turn_shared_secret = "n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons";
  turn_user_lifetime = "1h";

  # Registration
  enable_registration = false;
  registration_shared_secret = removeSuffix "\n" (readFile cfg.registrationSharedSecretFile);

  # Metrics
  enable_metrics = true;
  report_stats = true;

  # API Configuration
  app_service_config_files = cfg.appServiceConfigFiles;

  # Signing Keys
  signing_key_path = "${cfg.dataDir}/homeserver.signing.key";
  trusted_key_servers = [
    { server_name = "matrix.org"; }
  ];
  suppress_key_server_warning = true;

  # Email
  email = cfg.emailCfg;

  # Workers
  send_federation = false;
  federation_sender_instances = [ "federation_sender1" ];
  instance_map = {
    event_persister1 = {
      host = "localhost";
      port = 9091;
    };
  };

  stream_writers = {
    events = "event_persister1";
  };

  redis = {
    enabled = true;
  };
}

D modules/services/mumble.nix => modules/services/mumble.nix +0 -50
@@ 1,50 0,0 @@
{ config, lib, ... }:
let
  certs = config.security.acme.certs;
  serverName = "voip.nevarro.space";
  certDirectory = "${certs.${serverName}.directory}";
  port = config.services.murmur.port;

  murmurCfg = config.services.murmur;
in
lib.mkIf murmurCfg.enable {
  services.murmur = {
    registerHostname = serverName;
    registerName = "Nevarro";
    welcometext = ''
      Welcome to the Nevarro Mumble Server.

      If you are here for office hours, join the "Office Hours" channel. I will
      manually move you to a breakout room if necessary.
    '';

    # Keys
    sslCert = "${certDirectory}/fullchain.pem";
    sslKey = "${certDirectory}/key.pem";
    sslCa = "${certDirectory}/full.pem";
  };

  # Open up the ports for TCP and UDP
  networking.firewall = {
    allowedTCPPorts = [ 64738 ];
    allowedUDPPorts = [ 64738 ];
  };

  # Use nginx to do the ACME verification for mumble.
  services.nginx.virtualHosts."${serverName}" = {
    enableACME = true;
    locations."/".return = "301 https://mumble.info";
  };

  # https://github.com/NixOS/nixpkgs/issues/106068#issuecomment-739534275
  security.acme.certs.${serverName} = {
    group = "murmur-cert";
    postRun = "systemctl restart murmur.service";
  };
  users.groups.murmur-cert.members = [ "murmur" "nginx" ];

  # Add a backup service.
  services.backup.backups.murmur = {
    path = config.users.users.murmur.home;
  };
}

D modules/services/nginx.nix => modules/services/nginx.nix +0 -123
@@ 1,123 0,0 @@
{ config, lib, ... }: with lib; let
  hostnameDomain = "${config.networking.hostName}.${config.networking.domain}";
  nginxCfg = config.services.nginx;
  websites = nginxCfg.websites;

  permissionsPolicyDisables = [
    "accelerometer"
    "camera"
    "geolocation"
    "gyroscope"
    "interest-cohort"
    "magnetometer"
    "microphone"
    "payment"
    "usb"
  ];

  # https://securityheaders.com/?q=sumnerevans.com&followRedirects=on
  securityHeaders = mapAttrsToList (k: v: ''add_header ${k} "${v}";'') {
    # Disable using my website in FLoC calculations.
    # https://scotthelme.co.uk/goodbye-feature-policy-and-hello-permissions-policy/
    "Permissions-Policy" = concatMapStringsSep ", " (d: "${d}=()") permissionsPolicyDisables;
    "Strict-Transport-Security" = "max-age=31536000; includeSubDomains";
    "X-Frame-Options" = "SAMEORIGIN";
    "X-Content-Type-Options" = "nosniff";
    "Referrer-Policy" = "same-origin";
    "Content-Security-Policy" = "default-src https: 'unsafe-inline' 'unsafe-eval'";
  };
in
{
  options =
    let
      websiteSubmodule = { name, ... }: {
        options = {
          hostname = mkOption {
            type = types.str;
            description = "The hostname of the website.";
          };
          extraLocations = mkOption {
            type = with types; attrsOf anything;
            default = { };
          };
          excludeTerms = mkOption {
            type = with types; listOf str;
            default = [ ];
          };
        };
      };
    in
    {
      services.nginx.websites = mkOption {
        type = with types; listOf (submodule websiteSubmodule);
        default = [ ];
      };
    };

  config = mkMerge [
    (mkIf nginxCfg.enable {
      services.nginx = {
        enableReload = true;
        clientMaxBodySize = "250m";
        recommendedGzipSettings = true;
        recommendedOptimisation = true;
        recommendedProxySettings = true;
        recommendedTlsSettings = true;

        appendConfig = ''
          worker_processes auto;
        '';
        eventsConfig = ''
          worker_connections 8192;
        '';

        virtualHosts = (optionalAttrs (config.networking.domain != null) {
          ${hostnameDomain} = {
            forceSSL = true;
            enableACME = true;

            # Enable a status page and expose it.
            locations."/status".extraConfig = ''
              stub_status on;
              access_log off;
            '';
          };
        });
      };

      # Open up the ports
      networking.firewall.allowedTCPPorts = [ 80 443 ];
    })

    (mkIf (websites != [ ]) {
      # Enable nginx and add the static websites.
      services.nginx = {
        virtualHosts =
          let
            websiteConfig = { hostname, extraLocations, ... }: {
              name = hostname;
              value = {
                forceSSL = true;
                enableACME = true;
                locations = extraLocations // {
                  "/" = {
                    root = "/var/www/${hostname}";
                    extraConfig = ''
                      # Put logs for each website in a separate log file.
                      access_log /var/log/nginx/${hostname}.access.log;

                      ${concatStringsSep "\n" securityHeaders}
                    '';
                  };
                };
              };
            };
          in
          listToAttrs (map websiteConfig websites);
      };

      # Add metrics displays for each of the websites.
      services.metrics.websites = websites;
    })
  ];
}

D modules/services/postgresql.nix => modules/services/postgresql.nix +0 -57
@@ 1,57 0,0 @@
{ config, lib, pkgs, ... }: with lib; mkMerge [
  (
    mkIf config.services.postgresql.enable {
      systemd.services.mkPostgresDataDir = {
        description = "Make sure the postgres data directory exists before booting the service.";
        wantedBy = [ "multi-user.target" ];
        before = [ "postgresql.service" ];
        serviceConfig = {
          ExecStart = pkgs.writeShellScript "ensure-dirs" ''
            mkdir -p ${config.services.postgresql.dataDir}