9fb6373708d5514bd4eade498bf6a32c54731ee6 — Kridsada Thanabulpong 2 months ago 7e89f58
Massive cleanup and move to gridns.xyz.
68 files changed, 1094 insertions(+), 1326 deletions(-)

D Vagrantfile
R ansible/{ansible.cfg => cfg}
D ansible/bootstrap.sh
D ansible/group_vars/all/main.yml
D ansible/host_vars/gridns-sg/main.yml
D ansible/playbook.yml
D ansible/roles/common/tasks/main.yml
D ansible/roles/dnscrypt-proxy/files/dnscrypt-proxy.toml
D ansible/roles/dnscrypt-proxy/files/log
D ansible/roles/dnscrypt-proxy/files/run
D ansible/roles/dnscrypt-proxy/handlers/main.yml
D ansible/roles/dnscrypt-proxy/tasks/deinstall.yml
D ansible/roles/doh-proxy/files/log
D ansible/roles/doh-proxy/files/run
D ansible/roles/doh-proxy/tasks/main.yml
D ansible/roles/nginx/files/dehydrated/periodic
D ansible/roles/nginx/files/nginx/run
D ansible/roles/nginx/handlers/main.yml
D ansible/roles/nginx/tasks/dehydrated.yml
D ansible/roles/nginx/tasks/deinstall.yml
D ansible/roles/nginx/tasks/main.yml
D ansible/roles/nginx/tasks/nginx.yml
D ansible/roles/nginx/templates/dehydrated/config.j2
D ansible/roles/nginx/templates/dehydrated/domains.txt.j2
D ansible/roles/nginx/templates/nginx/nginx.conf.j2
D ansible/roles/nginx/templates/nginx/site.conf.j2
D ansible/roles/nginx/vars/main.yml
D ansible/roles/unbound/files/blocklist/generate.sh
D ansible/roles/unbound/files/blocklist/periodic
D ansible/roles/unbound/files/unbound/log
D ansible/roles/unbound/files/unbound/run
D ansible/roles/unbound/files/unbound/unbound.conf
D ansible/roles/unbound/tasks/blocklist.yml
D ansible/roles/unbound/tasks/deinstall.yml
D ansible/roles/unbound/tasks/main.yml
A bootstrap.sh
R ansible/roles/{unbound/files/blocklist/blocklist.txt => t.txt}
A group_vars/all/main.yml
A group_vars/all/vault.yml
A host_vars/default/main.yml
A host_vars/gridns-jp/main.yml
A host_vars/gridns-sg/main.yml
A hosts.yml
A playbook.yml
A requirements.yml
A roles/dnscrypt-proxy/defaults/main.yml
R ansible/roles/common/handlers/{main.yml => main.yml}
R ansible/roles/dnscrypt-proxy/tasks/{main.yml => }
A roles/dnscrypt-proxy/templates/dnscrypt-proxy.toml.j2
A roles/doh-server/defaults/main.yml
A roles/doh-server/handlers/main.yml
A roles/doh-server/tasks/main.yml
A roles/doh-server/templates/doh-server.conf.j2
A roles/unbound-blocklist/defaults/main.yml
A roles/unbound-blocklist/files/blocklist.sh
A roles/unbound-blocklist/handlers/main.yml
A roles/unbound-blocklist/tasks/main.yml
A roles/unbound/defaults/main.yml
R ansible/roles/unbound/handlers/{main.yml => }
R ansible/roles/unbound/tasks/{unbound.yml => }
A roles/unbound/templates/unbound.conf.j2
D terraform/do_instance.tf
D terraform/do_network.tf
D terraform/setup.tf
D terraform/terraform.py
D terraform/terraform.tfstate
D terraform/terraform.tfvars
D terraform/variables.tf
D Vagrantfile => Vagrantfile +0 -16
@@ 1,16 0,0 @@-# -*- mode: ruby -*-
- # vi: set ft=ruby :
- 
- Vagrant.configure("2") do |config|
-   config.vm.box = "pxfs/freebsd-11.1"
-   config.vm.network "forwarded_port", guest: 80, host: 8080
-   config.vm.synced_folder ".", "/vagrant", disabled: true
- 
-   config.vm.provision :ansible do |ansible|
-     ansible.limit = "all"
-     ansible.playbook = "ansible/playbook.yml"
-     ansible.config_file = "ansible/ansible.cfg"
-     ansible.compatibility_mode = "2.0"
-     ansible.tags = ENV['ANSIBLE_TAGS']&.split
-   end
- end

R ansible/ansible.cfg => ansible.cfg +0 -0

        
D ansible/bootstrap.sh => ansible/bootstrap.sh +0 -7
@@ 1,7 0,0 @@-#!/usr/bin/env bash
- BASE_DIR=$(cd "$(dirname "$0")/../" || exit; pwd -P)
- PLAYBOOK_FILE="${BASE_DIR}/ansible/playbook.yml"
- HOST_FILE="${BASE_DIR}/terraform/terraform.py"
- 
- export ANSIBLE_CONFIG="${BASE_DIR}/ansible/ansible.cfg"
- ansible-playbook "${PLAYBOOK_FILE}" -i "${HOST_FILE}" --ssh-common-args "-o StrictHostKeyChecking=no" "$@"

D ansible/group_vars/all/main.yml => ansible/group_vars/all/main.yml +0 -6
@@ 1,6 0,0 @@----
- ansible_python_interpreter: /usr/local/bin/python3.6
- 
- site_ssl: no
- site_ssl_staging: yes
- site_domain: example.com

D ansible/host_vars/gridns-sg/main.yml => ansible/host_vars/gridns-sg/main.yml +0 -4
@@ 1,4 0,0 @@----
- site_ssl: yes
- site_ssl_staging: no
- site_domain: sg.dns.grid.in.th> 
\ No newline at end of file

D ansible/playbook.yml => ansible/playbook.yml +0 -32
@@ 1,32 0,0 @@----
- - hosts: all
-   gather_facts: false
-   tasks:
-     - name: bootstrap pkg
-       become: yes
-       raw: /usr/bin/env ASSUME_ALWAYS_YES=1 /usr/sbin/pkg bootstrap
-       ignore_errors: true
-       register: bootstrap_pkg
-       changed_when: bootstrap_pkg.stdout.find("already bootstrapped") == -1
- 
-     - name: update pkg
-       become: yes
-       raw: /usr/sbin/pkg update -f
-       ignore_errors: true
-       register: update_pkg
-       changed_when: update_pkg.stdout.find("are up to date") == -1
- 
-     - name: install python
-       become: yes
-       raw: /usr/sbin/pkg install -y python36
-       ignore_errors: true
-       register: install_python
-       changed_when: install_python.stdout.find("are already installed") == -1
- 
- - hosts: all
-   roles:
-     - { role: common, tags: ['common'] }
-     - { role: dnscrypt-proxy, tags: ['dnscrypt-proxy'] }
-     - { role: unbound, tags: ['unbound'] }
-     - { role: doh-proxy, tags: ['doh-proxy'] }
-     - { role: nginx, tags: ['nginx'] }

D ansible/roles/common/tasks/main.yml => ansible/roles/common/tasks/main.yml +0 -72
@@ 1,72 0,0 @@----
- - name: install common dependencies
-   become: yes
-   pkgng: name={{item}} state=present
-   with_items:
-     - ca_root_nss
-     - git-lite
-     - curl
- 
- - name: install ntp
-   become: yes
-   pkgng: name=ntp state=present
- 
- - name: enable ntpd in rc.conf
-   become: yes
-   lineinfile:
-     dest: /etc/rc.conf
-     line: ntpd_enable=YES
-     state: present
-   register: ntpd_enabled
- 
- - name: set ntp time
-   become: yes
-   command: ntpd -qg
-   when: ntpd_enabled.changed
- 
- - name: enable ntpd
-   become: yes
-   service: name=ntpd enabled=yes state=started
- 
- - name: install pip
-   become: yes
-   shell: curl https://bootstrap.pypa.io/get-pip.py | /usr/local/bin/python3.6
-   args:
-     creates: /usr/local/bin/pip3.6
-     warn: no
- 
- - name: ensure lang
-   lineinfile:
-     path: "{{ansible_env['HOME']}}/.profile"
-     line: "LANG=en_US.UTF-8; export LANG"
- 
- - name: install s6
-   become: yes
-   pkgng: name={{item}} state=present
-   with_items:
-     - execline
-     - s6
- 
- - name: enable s6 in rc.conf
-   become: yes
-   lineinfile:
-     dest: /etc/rc.conf
-     line: s6_enable=YES
-     state: present
-   register: s6_enabled
- 
- - name: enable s6
-   become: yes
-   service: name=s6 enabled=yes state=started
-   when: s6_enabled.changed
- 
- - name: setup periodic logging
-   become: yes
-   lineinfile:
-     dest: /etc/periodic.conf
-     line: "{{item}}"
-     state: present
-   with_items:
-     - daily_output="/var/log/daily.log"
-     - weekly_output="/var/log/weekly.log"
-     - monthly_output="/var/log/monthly.log"

D ansible/roles/dnscrypt-proxy/files/dnscrypt-proxy.toml => ansible/roles/dnscrypt-proxy/files/dnscrypt-proxy.toml +0 -29
@@ 1,29 0,0 @@-server_names = ['cloudflare', 'cloudflare-ipv6']
- listen_addresses = ['127.0.0.1:5353']
- max_clients = 250
- 
- ipv4_servers = true
- ipv6_servers = true
- dnscrypt_servers = true
- doh_servers = true
- 
- require_dnssec = true
- require_nolog = true
- require_nofilter = true
- 
- force_tcp = false
- timeout = 2500
- cert_refresh_delay = 240
- 
- fallback_resolver = '1.1.1.1:53'
- ignore_system_dns = true
- block_ipv6 = false
- 
- cache = false
- 
- [sources.public-resolvers]
- urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v2/public-resolvers.md', 'https://download.dnscrypt.info/resolvers-list/v2/public-resolvers.md']
- cache_file = 'public-resolvers.md'
- minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3'
- refresh_delay = 72
- prefix = ''

D ansible/roles/dnscrypt-proxy/files/log => ansible/roles/dnscrypt-proxy/files/log +0 -2
@@ 1,2 0,0 @@-#!/usr/local/bin/execlineb -P
- s6-log -b n20 s10000000 /var/log/dnscrypt-proxy/> 
\ No newline at end of file

D ansible/roles/dnscrypt-proxy/files/run => ansible/roles/dnscrypt-proxy/files/run +0 -5
@@ 1,5 0,0 @@-#!/usr/local/bin/execlineb -P
- 
- s6-setuidgid _dnscrypt-proxy
- fdmove -c 2 1
- /usr/local/sbin/dnscrypt-proxy -config /usr/local/etc/dnscrypt-proxy.toml> 
\ No newline at end of file

D ansible/roles/dnscrypt-proxy/handlers/main.yml => ansible/roles/dnscrypt-proxy/handlers/main.yml +0 -4
@@ 1,4 0,0 @@----
- - name: restart dnscrypt-proxy
-   become: yes
-   command: s6-svc -t /var/service/dnscrypt-proxy

D ansible/roles/dnscrypt-proxy/tasks/deinstall.yml => ansible/roles/dnscrypt-proxy/tasks/deinstall.yml +0 -13
@@ 1,13 0,0 @@----
- - name: disable dnscrypt-proxy in rc.conf
-   become: yes
-   lineinfile:
-     dest: /etc/rc.conf
-     line: dnscrypt_proxy_enable=YES
-     state: absent
-   register: dnscrypt_deinstall
- 
- - name: disable dnscrypt-proxy
-   become: yes
-   service: name=dnscrypt-proxy enabled=yes state=stopped
-   when: dnscrypt_deinstall.changed

D ansible/roles/doh-proxy/files/log => ansible/roles/doh-proxy/files/log +0 -2
@@ 1,2 0,0 @@-#!/usr/local/bin/execlineb -P
- s6-log -b n20 -\[DNS\] s10000000 /var/log/doh-proxy/> 
\ No newline at end of file

D ansible/roles/doh-proxy/files/run => ansible/roles/doh-proxy/files/run +0 -5
@@ 1,5 0,0 @@-#!/usr/local/bin/execlineb -P
- 
- s6-setuidgid www
- fdmove -c 2 1
- doh-httpproxy --upstream-resolver=127.0.0.1 --port=8080 --listen-address=127.0.0.1 --level=WARNING

D ansible/roles/doh-proxy/tasks/main.yml => ansible/roles/doh-proxy/tasks/main.yml +0 -33
@@ 1,33 0,0 @@----
- - name: install doh-proxy
-   become: yes
-   pip:
-     name: "doh-proxy"
-     state: latest
- 
- - name: ensure doh-proxy service directory
-   become: yes
-   file:
-     path: "{{item}}"
-     state: directory
-   with_items:
-     - /var/service/doh-proxy
-     - /var/service/doh-proxy/log
- 
- - name: install doh-proxy service
-   become: yes
-   copy:
-     src: run
-     dest: /var/service/doh-proxy/run
-     mode: 0555
-   notify:
-     - rescan s6
- 
- - name: install doh-proxy log service
-   become: yes
-   copy:
-     src: log
-     dest: /var/service/doh-proxy/log/run
-     mode: 0555
-   notify:
-     - rescan s6

D ansible/roles/nginx/files/dehydrated/periodic => ansible/roles/nginx/files/dehydrated/periodic +0 -13
@@ 1,13 0,0 @@-#!/usr/local/bin/execlineb -P
- 
- /usr/local/bin/emptyenv
- env PATH=/usr/local/bin:/etc:/bin:/sbin:/usr/bin:/usr/sbin
- 
- foreground {
-   s6-setuidgid www
-   foreground {
-     /usr/local/bin/dehydrated -c
-   }
- }
- 
- s6-svc -h /var/service/nginx> 
\ No newline at end of file

D ansible/roles/nginx/files/nginx/run => ansible/roles/nginx/files/nginx/run +0 -4
@@ 1,4 0,0 @@-#!/usr/local/bin/execlineb -P
- 
- fdmove -c 2 1
- /usr/local/sbin/nginx> 
\ No newline at end of file

D ansible/roles/nginx/handlers/main.yml => ansible/roles/nginx/handlers/main.yml +0 -8
@@ 1,8 0,0 @@----
- - name: restart nginx
-   become: yes
-   command: s6-svc -t /var/service/nginx
- 
- - name: reload nginx
-   become: yes
-   command: s6-svc -h /var/service/nginx

D ansible/roles/nginx/tasks/dehydrated.yml => ansible/roles/nginx/tasks/dehydrated.yml +0 -49
@@ 1,49 0,0 @@----
- - name: install dehydrated
-   become: yes
-   pkgng: name=dehydrated state=present
- 
- - name: ensure dehydrated directory
-   become: yes
-   file:
-     path: "{{item}}"
-     owner: www
-     group: www
-     state: directory
-   with_items:
-     - /usr/local/etc/dehydrated
-     - /usr/local/www/dehydrated
- 
- - name: configure dehydrated
-   become: yes
-   template:
-     src: "dehydrated/{{item}}.j2"
-     dest: "/usr/local/etc/dehydrated/{{item}}"
-   with_items:
-     - config
-     - domains.txt
- 
- - name: setup letsencrypt account
-   become: yes
-   become_user: www
-   command: /usr/local/bin/dehydrated --register --accept-terms
-   args:
-     creates: /usr/local/etc/dehydrated/accounts
-   notify:
-     - restart nginx
- 
- - name: setup initial cert
-   become: yes
-   become_user: www
-   command: /usr/local/bin/dehydrated -c
-   args:
-     creates: "/usr/local/etc/dehydrated/certs/{{site_domain}}"
-   notify:
-     - restart nginx
- 
- - name: setup dehydrated periodic
-   become: yes
-   copy:
-     src: "dehydrated/periodic"
-     dest: "/usr/local/etc/periodic/weekly/900.dehydrated"
-     mode: 0555

D ansible/roles/nginx/tasks/deinstall.yml => ansible/roles/nginx/tasks/deinstall.yml +0 -38
@@ 1,38 0,0 @@----
- - name: disable nginx in rc.conf
-   become: yes
-   lineinfile:
-     dest: /etc/rc.conf
-     line: nginx_enable=YES
-     state: absent
-   register: nginx_deinstall
- 
- - name: disable nginx
-   become: yes
-   service: name=nginx enabled=yes state=stopped
-   when: nginx_deinstall.changed
- 
- - name: uninstall nginx periodic
-   become: yes
-   file:
-     path: "/usr/local/etc/periodic/weekly/900.nginx"
-     state: absent
- 
- - name: disable nginx in periodic.conf
-   become: yes
-   lineinfile:
-     dest: /etc/periodic.conf
-     line: "{{item}}"
-     state: absent
-   with_items:
-     - weekly_nginx_reload_enable="YES"
- 
- - name: disable dehydrated in periodic.conf
-   become: yes
-   lineinfile:
-     dest: /etc/periodic.conf
-     line: "{{item}}"
-     state: absent
-   with_items:
-     - weekly_dehydrated_enable="YES"
-     - weekly_dehydrated_user="www"

D ansible/roles/nginx/tasks/main.yml => ansible/roles/nginx/tasks/main.yml +0 -5
@@ 1,5 0,0 @@----
- - import_tasks: "deinstall.yml"
- - import_tasks: "nginx.yml"
- - import_tasks: "dehydrated.yml"
-   when: site_ssl

D ansible/roles/nginx/tasks/nginx.yml => ansible/roles/nginx/tasks/nginx.yml +0 -44
@@ 1,44 0,0 @@----
- - name: install nginx
-   become: yes
-   pkgng: name=nginx state=present
- 
- - name: ensure nginx directory
-   become: yes
-   file:
-     path: "{{item}}"
-     state: directory
-   with_items:
-     - /usr/local/etc/nginx
-     - /usr/local/etc/nginx/conf.d
- 
- - name: configure nginx
-   become: yes
-   template:
-     src: "nginx/nginx.conf.j2"
-     dest: "/usr/local/etc/nginx/nginx.conf"
-   notify:
-     - reload nginx
- 
- - name: configure nginx site
-   become: yes
-   template:
-     src: "nginx/site.conf.j2"
-     dest: "/usr/local/etc/nginx/conf.d/site.conf"
-   notify:
-     - reload nginx
- 
- - name: ensure nginx service directory
-   become: yes
-   file:
-     path: /var/service/nginx
-     state: directory
- 
- - name: install nginx service
-   become: yes
-   copy:
-     src: nginx/run
-     dest: /var/service/nginx/run
-     mode: 0555
-   notify:
-     - rescan s6

D ansible/roles/nginx/templates/dehydrated/config.j2 => ansible/roles/nginx/templates/dehydrated/config.j2 +0 -4
@@ 1,4 0,0 @@-{% if site_ssl_staging is defined and site_ssl_staging %}
- CA="https://acme-staging.api.letsencrypt.org/directory"
- CA_TERMS="https://acme-staging.api.letsencrypt.org/terms"
- {% endif %}> 
\ No newline at end of file

D ansible/roles/nginx/templates/dehydrated/domains.txt.j2 => ansible/roles/nginx/templates/dehydrated/domains.txt.j2 +0 -1
@@ 1,1 0,0 @@-{{ site_domain }}

D ansible/roles/nginx/templates/nginx/nginx.conf.j2 => ansible/roles/nginx/templates/nginx/nginx.conf.j2 +0 -41
@@ 1,41 0,0 @@-user www;
- daemon off;
- worker_processes {{ansible_processor_vcpus|default(ansible_processor_count)}};
- pid /var/run/nginx.pid;
- 
- events {
-   worker_connections 256;
- }
- 
- http {
-   include mime.types;
-   default_type application/octet-stream;
- 
-   server_names_hash_bucket_size 64;
-   types_hash_max_size 2048;
-   keepalive_timeout 120;
- 
-   tcp_nodelay on;
-   tcp_nopush on;
-   sendfile on;
- 
-   gzip on;
-   gzip_types application/json text/css text/javascript;
-   gzip_proxied any;
-   gzip_buffers 16 8k;
-   gzip_comp_level 9;
-   gzip_min_length 16;
- 
-   client_max_body_size 128m;
- 
-   server {
-     listen 80;
- 
-     location / {
-       deny all;
-       return 403;
-     }
-   }
- 
-   include conf.d/*.conf;
- }

D ansible/roles/nginx/templates/nginx/site.conf.j2 => ansible/roles/nginx/templates/nginx/site.conf.j2 +0 -53
@@ 1,53 0,0 @@-{% if site_ssl %}
- server {
-   listen 80;
-   server_name {{site_domain}};
-   access_log off;
- 
-   location /.well-known/acme-challenge {
-     alias /usr/local/www/dehydrated;
-   }
- 
-   location / {
-     return 301 https://$server_name$request_uri;
-   }
- }
- 
- {% endif %}
- server {
-   server_name {{site_domain}};
-   root {{site_root_web}};
-   access_log off;
- {% if site_ssl %}
-   listen 443 ssl http2;
- 
-   ssl_certificate /usr/local/etc/dehydrated/certs/{{site_domain}}/fullchain.pem;
-   ssl_certificate_key /usr/local/etc/dehydrated/certs/{{site_domain}}/privkey.pem;
-   ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
-   ssl_ciphers HIGH:!aNULL:!eNULL:!EXPORT:!CAMELLIA:!DES:!MD5:!PSK:!RC4;
-   ssl_prefer_server_ciphers on;
- {% else %}
-   listen 80;
- 
-   location /.well-known/acme-challenge {
-     alias /usr/local/www/dehydrated;
-   }
- {% endif %}
- 
-   location ~ /(\.git|\.ht) {
-     deny all;
-     return 404;
-   }
- 
-   location / {
-     try_files $uri $uri/ @proxy;
-   }
- 
-   location @proxy {
-     proxy_set_header Host $http_host;
-     proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
-     proxy_redirect off;
-     proxy_buffering off;
-     proxy_pass http://127.0.0.1:8080;
-   }
- }> 
\ No newline at end of file

D ansible/roles/nginx/vars/main.yml => ansible/roles/nginx/vars/main.yml +0 -2
@@ 1,2 0,0 @@----
- site_root_web: "/srv/http/{{site_domain}}"

D ansible/roles/unbound/files/blocklist/generate.sh => ansible/roles/unbound/files/blocklist/generate.sh +0 -71
@@ 1,71 0,0 @@-#!/bin/sh
- 
- #
- # Parsing args
- #
- 
- while [ "$#" -gt 0 ]; do
-     case $1 in
-         -b|--blocklist) blocklist="$2"; shift;;
-         -o|--output)    output="$2"; shift;;
-         *) echo "$0: illegal option -- $1"; exit 1;;
-     esac; shift
- done
- 
- #
- # Sanity check
- #
- 
- if [ ! -f "$blocklist" ]; then
-     echo "$0: expected --blocklist to be a path to a file."
-     exit 1
- fi
- 
- if [ ! "$output" ]; then
-     echo "$0: expected --output to be present."
-     exit 1
- fi
- 
- if [ -x "$(command -v curl)" ]; then
-     fetch_cmd="curl -sL"
- elif [ -x "$(command -v fetch)" ]; then
-     fetch_cmd="fetch -qo -"
- else
-     echo "$0: expected either fetch or curl to be installed."
-     exit 1
- fi
- 
- #
- # Blocklist parsing
- #
- 
- parse_blocklist() {
-     awk -F \# '$1 != "" { print $1 }' "$1" | while read -r line; do
-         # pass line as argument to parse function
-         # shellcheck disable=SC2086
-         eval parse_blocklist_line $line
-     done | sort -u
- }
- 
- parse_blocklist_line() {
-     case "$1" in
-         hostfile)   parse_hostfile "$2" "$3";;
-         domainonly) parse_domainonly "$2";;
-     esac
- }
- 
- parse_hostfile() {
-     eval "$fetch_cmd" "$2" | awk "\$1 == \"$1\" { print tolower(\$2) }" | tr -d "\\r"
- }
- 
- parse_domainonly() {
-     eval "$fetch_cmd" "$1" | awk -F \# '$1 != "" { print tolower($1) }' | tr -d "\\r"
- }
- 
- #
- # Main
- #
- 
- parse_blocklist "$blocklist" | awk '{
-     print "local-zone: \""$1"\" static"
- }' | tee "$output" > /dev/null

D ansible/roles/unbound/files/blocklist/periodic => ansible/roles/unbound/files/blocklist/periodic +0 -13
@@ 1,13 0,0 @@-#!/usr/local/bin/execlineb -P
- 
- /usr/local/bin/emptyenv
- env PATH=/usr/local/bin:/etc:/bin:/sbin:/usr/bin:/usr/sbin
- 
- foreground {
-   s6-setuidgid nobody
-   foreground {
-     sh /usr/local/etc/unbound/blocklist/generate.sh --blocklist /usr/local/etc/unbound/blocklist/blocklist.txt --output /usr/local/etc/unbound/blocklist/blocklist.conf
-   }
- }
- 
- s6-svc -t /var/service/unbound> 
\ No newline at end of file

D ansible/roles/unbound/files/unbound/log => ansible/roles/unbound/files/unbound/log +0 -2
@@ 1,2 0,0 @@-#!/usr/local/bin/execlineb -P
- s6-log -b n20 s10000000 /var/log/unbound/> 
\ No newline at end of file

D ansible/roles/unbound/files/unbound/run => ansible/roles/unbound/files/unbound/run +0 -4
@@ 1,4 0,0 @@-#!/usr/local/bin/execlineb -P
- 
- fdmove -c 2 1
- /usr/local/sbin/unbound -d -c /usr/local/etc/unbound/unbound.conf> 
\ No newline at end of file

D ansible/roles/unbound/files/unbound/unbound.conf => ansible/roles/unbound/files/unbound/unbound.conf +0 -40
@@ 1,40 0,0 @@-server:
-     verbosity: 0
- 
-     interface: 0.0.0.0
-     port: 53
-     ip-transparent: yes
-     interface-automatic: yes
-     access-control: 0.0.0.0/0 allow
- 
-     num-threads: 2
-     msg-cache-slabs: 2
-     rrset-cache-slabs: 2
-     infra-cache-slabs: 2
-     key-cache-slabs: 2
- 
-     msg-cache-size: 16m
-     rrset-cache-size: 16m
-     key-cache-size: 16m
-     neg-cache-size: 16m
- 
-     infra-cache-min-rtt: 1500
-     qname-minimisation: yes
-     use-caps-for-id: yes
-     hide-identity: yes
-     hide-version: yes
- 
-     auto-trust-anchor-file: /usr/local/etc/unbound/root.key
-     dlv-anchor-file: /usr/local/etc/unbound/dlv.isc.org.key
-     root-hints: /usr/local/etc/unbound/named.cache
- 
-     harden-glue: yes
-     harden-dnssec-stripped: yes
-     harden-below-nxdomain: yes
- 
-     do-not-query-localhost: no
-     include: /usr/local/etc/unbound/blocklist/blocklist.conf
- 
- forward-zone:
-     name: "."
-     forward-addr: 127.0.0.1@5353> 
\ No newline at end of file

D ansible/roles/unbound/tasks/blocklist.yml => ansible/roles/unbound/tasks/blocklist.yml +0 -46
@@ 1,46 0,0 @@----
- - name: ensure blocklist directory
-   become: yes
-   file:
-     path: "{{item}}"
-     owner: nobody
-     group: nobody
-     state: directory
-   with_items:
-     - /usr/local/etc/unbound/blocklist
- 
- - name: install blocklist
-   become: yes
-   copy:
-     src: "blocklist/{{item}}"
-     dest: "/usr/local/etc/unbound/blocklist/{{item}}"
-     owner: nobody
-     group: nobody
-   with_items:
-     - blocklist.txt
-     - generate.sh
-   register: blocklist_installed
- 
- - name: generate initial blocklist
-   become: yes
-   command: |
-     sh /usr/local/etc/unbound/blocklist/generate.sh \
-         --blocklist /usr/local/etc/unbound/blocklist/blocklist.txt \
-         --output /usr/local/etc/unbound/blocklist/blocklist.conf
-   when: blocklist_installed.changed
-   notify:
-     - restart unbound
- 
- - name: chown initial blocklist
-   become: yes
-   file:
-     path: /usr/local/etc/unbound/blocklist/blocklist.conf
-     owner: nobody
-     group: nobody
- 
- - name: setup blocklist periodic
-   become: yes
-   copy:
-     src: "blocklist/periodic"
-     dest: "/usr/local/etc/periodic/daily/900.blocklist"
-     mode: 0555

D ansible/roles/unbound/tasks/deinstall.yml => ansible/roles/unbound/tasks/deinstall.yml +0 -31
@@ 1,31 0,0 @@----
- - name: disable unbound in rc.conf
-   become: yes
-   lineinfile:
-     dest: /etc/rc.conf
-     line: unbound_enable=YES
-     state: absent
-   register: unbound_deinstall
- 
- - name: disable unbound
-   become: yes
-   service: name=unbound enabled=yes state=stopped
-   when: unbound_deinstall.changed
- 
- - name: uninstall blocklist periodic
-   become: yes
-   file:
-     dest: "/usr/local/etc/periodic/daily/000.blocklist"
-     state: absent
- 
- - name: disable blocklist in periodic.conf
-   become: yes
-   lineinfile:
-     dest: /etc/periodic.conf
-     line: "{{item}}"
-     state: absent
-   with_items:
-     - daily_blocklist_enable="YES"
-     - daily_blocklist_user="www"
-     - daily_blocklist_file="/usr/local/etc/unbound/blocklist/blocklist.txt"
-     - daily_blocklist_out="/usr/local/etc/unbound/blocklist/blocklist.conf"

D ansible/roles/unbound/tasks/main.yml => ansible/roles/unbound/tasks/main.yml +0 -4
@@ 1,4 0,0 @@----
- - import_tasks: deinstall.yml
- - import_tasks: blocklist.yml
- - import_tasks: unbound.yml

A bootstrap.sh => bootstrap.sh +57 -0
@@ 0,0 1,57 @@
+ #!/bin/sh
+ 
+ _base_dir=$(cd "$(dirname "$0")/" || exit; pwd -P)
+ 
+ _inventory_file="$_base_dir/hosts.yml"
+ _requirements_file="$_base_dir/requirements.yml"
+ _playbook_file="$_base_dir/playbook.yml"
+ 
+ 
+ ## Prepare
+ ##
+ 
+ # FreeBSD uses `ansible-playbook-{PYVER}` for Python bins.
+ for v in "" "-2.7" "-3.6" "-3.7"; do
+     if hash "ansible$v" 2>/dev/null; then
+         _ansible_playbook="ansible-playbook$v"
+         _ansible_galaxy="ansible-galaxy$v"
+         break
+     fi
+ done
+ 
+ if [ ! "$_ansible_playbook" ] || [ ! "$_ansible_galaxy" ]; then
+     printf "No known Ansible versions found.\\n"
+     exit 1
+ fi
+ 
+ if ! hash pass 2>/dev/null; then
+     printf "Pass is required to be installed.\\n"
+     exit 1
+ fi
+ 
+ case "$FORCE_UPDATE_GALAXY" in
+     1 | y* | Y* | t* | T* )
+         _ansible_galaxy_args="--force";;
+     * )
+         _ansible_galaxy_args="";;
+ esac
+ 
+ 
+ ## Main
+ ##
+ 
+ if ! "$_ansible_galaxy" install $_ansible_galaxy_args -r "$_requirements_file"; then
+     printf "Cannot install Galaxy roles.\\n"
+     exit 1
+ fi
+ 
+ pass Ansible/gridns.xyz |
+     exec \
+         env ANSIBLE_CONFIG="$_base_dir/ansible.cfg" \
+         "$_ansible_playbook" \
+         "$_playbook_file" \
+         -i "$_inventory_file" \
+         --user=freebsd \
+         --vault-password-file=/dev/stdin \
+         --ssh-common-args="-o StrictHostKeyChecking=no" \
+         "$@"

R ansible/roles/unbound/files/blocklist/blocklist.txt => files/blocklist.txt +1 -0
@@ 6,6 6,7 @@ #
  # A line begin with # will be ignored.
  hostfile    0.0.0.0    https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts
+ hostfile    0.0.0.0    https://raw.githubusercontent.com/crazy-max/WindowsSpyBlocker/master/data/hosts/spy.txt
  hostfile    127.0.0.1  http://sysctl.org/cameleon/hosts
  hostfile    127.0.0.1  https://hosts-file.net/ad_servers.txt
  hostfile    127.0.0.1  https://sites.google.com/site/hosts2ch/ja

A group_vars/all/main.yml => group_vars/all/main.yml +154 -0
@@ 0,0 1,154 @@
+ ---
+ ## Local configurations
+ ##
+ 
+ ansible_python_interpreter: /usr/local/bin/python3.6
+ 
+ 
+ ## System tuning
+ ##
+ 
+ loaded_modules:
+   - zfs
+ 
+ machdep_disable_tsc_calibration: 1
+ 
+ kern_timecounter_invariant_tsc: 1
+ 
+ 
+ ## Roles configurations
+ ##
+ 
+ # Dehydrated
+ #
+ 
+ dehydrated_domains:
+   - "{{hostname}}"
+ 
+ dehydrated_postcmds:
+   - s6-svc -h /var/service/nginx
+ 
+ dehydrated_staging: no
+ 
+ # DNSCrypt-Proxy
+ #
+ 
+ dnscrypt_proxy_addresses:
+   - "[::1]:5353"
+ 
+ dnscrypt_proxy_cache: no
+ 
+ dnscrypt_proxy_drop_privileges_early: yes
+ 
+ dnscrypt_proxy_fallback_resolver: "[2606:4700:4700::1111]:53"
+ 
+ dnscrypt_proxy_use_servers:
+   - ipv6
+   - dnscrypt
+   - doh
+ 
+ dnscrypt_proxy_upstreams:
+   - cloudflare-ipv6
+ 
+ # DOH-server
+ #
+ 
+ doh_server_addresses:
+   - "[::1]:8053"
+ 
+ doh_server_upstream:
+   - "[::1]:1053"
+ 
+ 
+ # Nginx
+ #
+ 
+ nginx_access_log: /dev/null
+ 
+ nginx_drop_privileges_early: yes
+ 
+ nginx_error_log_level: warn
+ 
+ nginx_http_port: 1080
+ 
+ nginx_https_port: 1443
+ 
+ nginx_ipv6: yes
+ 
+ nginx_sites:
+   - secure: no
+     default: yes
+     locations:
+       - path: /
+         redirect_match: "^/(.*)$"
+         redirect_target: "https://{{hostname}}/$1"
+   - secure: yes
+     dehydrated_cert: "{{hostname}}"
+     domains:
+       - "{{hostname}}"
+     locations:
+       - path: /dns-query
+         proxy_name: doh
+         proxy_backends:
+           - "[::1]:8053"
+       - path: /
+         redirect_match: "^/(.*)$"
+         redirect_target: "https://gridns.xyz/$1"
+ 
+ # Ntpd
+ #
+ 
+ ntpd_pools:
+   - asia.pool.ntp.org
+ 
+ ntpd_servers:
+ 
+ # PF
+ #
+ 
+ pf_allow_ipv6: yes
+ 
+ pf_ext_iface: vtnet0
+ 
+ pf_ipv4_forwarded_ports:
+   - { port: "80", proto: tcp, address: "{{ipv4_address}}", dest_port: "1080" }
+   - { port: "443", proto: tcp, address: "{{ipv4_address}}", dest_port: "1443" }
+ 
+ pf_ipv6_forwarded_ports:
+   - { port: "80", proto: tcp, address: "{{ipv6_address}}", dest_port: "1080" }
+   - { port: "443", proto: tcp, address: "{{ipv6_address}}", dest_port: "1443" }
+ 
+ # Unbound
+ #
+ 
+ unbound_access_control:
+   - "::1/128 allow"
+ 
+ unbound_addresses:
+   - "::1"
+ 
+ unbound_forward_zones:
+   - name: .
+     addresses:
+       - "::1@5353"
+ 
+ unbound_drop_privileges_early: yes
+ 
+ unbound_do_not_query_localhost: no
+ 
+ unbound_includes:
+   - /usr/local/etc/unbound-blocklist/blocklist.conf
+ 
+ unbound_infra_cache_min_rtt: 1500
+ 
+ unbound_key_cache_size: 16m
+ 
+ unbound_msg_cache_size: 16m
+ 
+ unbound_neg_cache_size: 16m
+ 
+ unbound_port: 1053
+ 
+ unbound_rrset_cache_size: 16m
+ 
+ unbound_verbosity: 0

A group_vars/all/vault.yml => group_vars/all/vault.yml +26 -0
@@ 0,0 1,26 @@
+ $ANSIBLE_VAULT;1.1;AES256
+ 66643065343936393637356363346561633666373030363734663633363262393837633437653338
+ 3462383433343766393237373036653435373361313964630a353038336539646634643561326563
+ 31626232653938396635383939646165633033343536323032336431663564616261323463313530
+ 6332636339623462340a353639613435663163353063383362383837303737653265656564383636
+ 34313064313633343565663330623531393763623033623631323363636665613866316262363365
+ 33633062346436626365343961316137326232613738623534613432666161386164386366626536
+ 33386136636331353233646639346662313235613233633533316135616132643232323230303162
+ 38333636393831393839363039653664393230323132346336383566313630656634386338363135
+ 36336138313436383334663930656561616437316235353538643465663165623862323265376161
+ 65353964623636356233653634316333303332376165316438323834653835663966613332636234
+ 38373030356532646431623162633935306531353633356162393637346634613335363638346138
+ 66646337346461646531663431636336393937396162336266333033633837663230663066353435
+ 33366361376337383362643535613833323963393437343534626136343335366165366366613664
+ 33616465366631343530366337363764653930323735366261626266353233656461666134353235
+ 65643533363232393063663263653433626532623231383432306564666136343364653966623134
+ 39313637643238366164303661383761636661303539393262386635643135626666636564376461
+ 33636164323634623434396336323539386366656539653434303164626133643630333932623962
+ 30663462343462613736633466343735623936653732383762336661383430383135373732303335
+ 61363332303736336133363533323264383630613661626239383564336431343834666139333934
+ 39333538656230383630343739653861616364376330646135336364343030643137366561306464
+ 61353030353237343835306163326266393464376663666261663930323339653238366361396434
+ 38396133343036623137623564656438363036346632356635363236666166356233616462316331
+ 62326632653233336264323766643338316461653566643435656637386338366338633830663939
+ 39366230366636653164663433383265643233336431303133363162663333356665623562666364
+ 30356331616630383730363762353331623831623661313664653461313866613066

A host_vars/default/main.yml => host_vars/default/main.yml +7 -0
@@ 0,0 1,7 @@
+ ---
+ hostname: dev.gridns.xyz
+ 
+ # Dehydrated
+ #
+ 
+ dehydrated_staging: yes

A host_vars/gridns-jp/main.yml => host_vars/gridns-jp/main.yml +4 -0
@@ 0,0 1,4 @@
+ ---
+ hostname: jp.gridns.xyz
+ 
+ freebsd_pkg_host: pkg0.twn.freebsd.org

A host_vars/gridns-sg/main.yml => host_vars/gridns-sg/main.yml +4 -0
@@ 0,0 1,4 @@
+ ---
+ hostname: sg.gridns.xyz
+ 
+ freebsd_pkg_host: pkg0.twn.freebsd.org

A hosts.yml => hosts.yml +22 -0
@@ 0,0 1,22 @@
+ ---
+ all:
+   hosts:
+     gridns-sg:
+       ansible_host: sg.gridns.xyz
+       ipv4_address: 139.162.3.123
+       ipv6_address: 2400:8901::f03c:91ff:feed:8d47
+       primary_iface: vtnet0
+     gridns-jp:
+       ansible_host: jp.gridns.xyz
+       ipv4_address: 172.105.241.93
+       ipv6_address: 2400:8902::f03c:91ff:feed:220b
+       primary_iface: vtnet0
+   children:
+     dns:
+       hosts:
+         gridns-sg:
+         gridns-jp:
+     http:
+       hosts:
+         gridns-sg:
+         gridns-jp:

A playbook.yml => playbook.yml +112 -0
@@ 0,0 1,112 @@
+ ---
+ ## Bootstrap
+ ##
+ 
+ - hosts: all
+   gather_facts: false
+   tags:
+     - common
+ 
+   tasks:
+     - name: bootstrap python installation
+       become: yes
+       raw: |
+         echo 'resolv_conf="/dev/null"' > /etc/resolvconf.conf
+         echo 'nameserver 1.1.1.1' > /etc/resolvconf.conf
+         echo 'nameserver 1.0.0.1' >> /etc/resolvconf.conf
+         mkdir -p /usr/local/etc/pkg/repos
+ 
+         (
+           echo 'FreeBSD: {'
+           echo '  url: "pkg+http://{{freebsd_pkg_host}}/${ABI}/quarterly",'
+           echo '  mirror_type: "srv",'
+           echo '  signature_type: "fingerprints",'
+           echo '  fingerprints: "/usr/share/keys/pkg",'
+           echo '  enabled: yes'
+           echo '}'
+         ) > /usr/local/etc/pkg/repos/FreeBSD.conf
+ 
+         if ! test -f /usr/local/bin/python3.6; then
+           env ASSUME_ALWAYS_YES=1 /usr/sbin/pkg bootstrap
+           pkg-static update -f
+           pkg-static install -y python36
+         else
+           pkg-static update
+           exit 128
+         fi
+       register: python_bootstrap
+       failed_when: python_bootstrap.rc != 0 and python_bootstrap.rc != 128
+       changed_when: python_bootstrap.rc != 128
+ 
+     - setup:
+ 
+     - name: install command line tools
+       become: yes
+       pkgng:
+         name:
+           - ca_root_nss
+           - curl
+           - git-lite
+         state: latest
+ 
+     - import_role:
+         name: freebsd-roles/freebsd-tuning
+       tags:
+         - tuning
+ 
+     - import_role:
+         name: freebsd-roles/freebsd-pf
+       tags:
+         - pf
+ 
+     - import_role:
+         name: freebsd-roles/freebsd-s6
+       tags:
+         - s6
+ 
+     - import_role:
+         name: freebsd-roles/freebsd-ntpd
+       tags:
+         - ntpd
+ 
+ 
+ ## DNS
+ ##
+ 
+ - hosts: dns
+   tasks:
+     - import_role:
+         name: dnscrypt-proxy
+       tags:
+         - dnscrypt-proxy
+ 
+     - import_role:
+         name: unbound-blocklist
+       tags:
+         - unbound-blocklist
+ 
+     - import_role:
+         name: unbound
+       tags:
+         - unbound
+ 
+ 
+ ## HTTP
+ ##
+ 
+ - hosts: http
+   tasks:
+     - import_role:
+         name: doh-server
+       tags:
+         - doh-server
+ 
+     - import_role:
+         name: freebsd-roles/freebsd-s6-dehydrated
+       tags:
+         - dehydrated
+ 
+     - import_role:
+         name: freebsd-roles/freebsd-s6-nginx
+       tags:
+         - nginx

A requirements.yml => requirements.yml +4 -0
@@ 0,0 1,4 @@
+ ---
+ - src: git+https://git.sr.ht/~sirn/ansible-freebsd
+   version: master
+   name: freebsd-roles

A roles/dnscrypt-proxy/defaults/main.yml => roles/dnscrypt-proxy/defaults/main.yml +45 -0
@@ 0,0 1,45 @@
+ ---
+ dnscrypt_proxy_addresses:
+   - "127.0.0.1:5353"
+ 
+ dnscrypt_proxy_cache: yes
+ 
+ dnscrypt_proxy_cache_size: 512
+ 
+ dnscrypt_proxy_cache_min_ttl: 600
+ 
+ dnscrypt_proxy_cache_max_ttl: 86400
+ 
+ dnscrypt_proxy_cache_neg_min_ttl: 60
+ 
+ dnscrypt_proxy_cache_neg_max_ttl: 600
+ 
+ dnscrypt_proxy_fallback_resolver: "1.1.1.1:53"
+ 
+ dnscrypt_proxy_logger: |
+   #!/usr/local/bin/execlineb -P
+   s6-log -b n10 s1000000 t !"gzip -nq9" /var/log/dnscrypt-proxy/
+ 
+ dnscrypt_proxy_max_clients: 100
+ 
+ dnscrypt_proxy_require_servers:
+   - dnssec
+   - nolog
+   - nofilter
+ 
+ dnscrypt_proxy_sources:
+   - name: public-resolvers
+     urls:
+       - https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v2/public-resolvers.md
+       - https://download.dnscrypt.info/resolvers-list/v2/public-resolvers.md
+     minisign_key: RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3
+     refresh_delay: 72
+     prefix: ''
+ 
+ dnscrypt_proxy_use_servers:
+   - ipv4
+   - dnscrypt
+   - doh
+ 
+ dnscrypt_proxy_upstreams:
+   - cloudflare

R ansible/roles/common/handlers/main.yml => roles/dnscrypt-proxy/handlers/main.yml +8 -4
@@ 1,8 1,12 @@ ---
- - name: restart s6
-   become: yes
-   service: name=s6 state=restarted
- 
  - name: rescan s6
    become: yes
    command: s6-svscanctl -an /var/service
+ 
+ - name: restart dnscrypt-proxy
+   become: yes
+   command: s6-svc -t /var/service/dnscrypt-proxy
+ 
+ - name: restart dnscrypt-proxy logger
+   become: yes
+   command: s6-svc -t /var/service/dnscrypt-proxy/log

R ansible/roles/dnscrypt-proxy/tasks/main.yml => roles/dnscrypt-proxy/tasks/main.yml +39 -10
@@ 1,22 1,35 @@ ---
- - import_tasks: deinstall.yml
+ ## Install
+ ##
  
  - name: install dnscrypt-proxy
    become: yes
-   pkgng: name=dnscrypt-proxy2 state=present
+   pkgng:
+     name: dnscrypt-proxy2
+     state: present
+   notify:
+     - restart dnscrypt-proxy
+ 
+ 
+ ## Configure
+ ##
  
  - name: configure dnscrypt-proxy
    become: yes
-   copy:
-     src: dnscrypt-proxy.toml
-     dest: /usr/local/etc/dnscrypt-proxy.toml
+   template:
+     src: dnscrypt-proxy.toml.j2
+     dest: /usr/local/etc/dnscrypt-proxy/dnscrypt-proxy.toml
    notify:
      - restart dnscrypt-proxy
  
- - name: ensure dnscrypt-proxy service directory
+ 
+ ## Supervise
+ ##
+ 
+ - name: ensure dnscrypt-proxy service directories
    become: yes
    file:
-     path: "{{item}}"
+     dest: "{{item}}"
      state: directory
    with_items:
      - /var/service/dnscrypt-proxy


@@ 25,17 38,33 @@ - name: install dnscrypt-proxy service
    become: yes
    copy:
-     src: run
      dest: /var/service/dnscrypt-proxy/run
      mode: 0555
+     content: |
+       #!/usr/local/bin/execlineb -P
+       fdmove -c 2 1
+       {% if dnscrypt_proxy_drop_privileges_early %}
+       s6-setuidgid _dnscrypt-proxy
+       {% endif %}
+       /usr/local/sbin/dnscrypt-proxy -config /usr/local/etc/dnscrypt-proxy/dnscrypt-proxy.toml
    notify:
      - rescan s6
+     - restart dnscrypt-proxy
  
- - name: install dnscrypt-proxy logging service
+ - name: install dnscrypt-proxy log service
    become: yes
    copy:
-     src: log
      dest: /var/service/dnscrypt-proxy/log/run
      mode: 0555
+     content: "{{dnscrypt_proxy_logger}}"
    notify:
      - rescan s6
+     - restart dnscrypt-proxy logger
+ 
+ 
+ ## Per-role flush
+ ##
+ 
+ - name: flush handler
+   become: yes
+   meta: flush_handlers

A roles/dnscrypt-proxy/templates/dnscrypt-proxy.toml.j2 => roles/dnscrypt-proxy/templates/dnscrypt-proxy.toml.j2 +56 -0
@@ 0,0 1,56 @@
+ {% if dnscrypt_proxy_upstreams %}
+ server_names = [{% for upstream in dnscrypt_proxy_upstreams %}"{{upstream}}"{% if not loop.last %}, {% endif %}{% endfor %}]
+ {% endif %}
+ listen_addresses = [{% for addr in dnscrypt_proxy_addresses %}"{{addr}}"{% if not loop.last %}, {% endif %}{% endfor %}]
+ max_clients = {{dnscrypt_proxy_max_clients}}
+ {% if not dnscrypt_proxy_drop_privileges_early %}
+ username = "_dnscrypt-proxy"
+ {% endif %}
+ 
+ {% if dnscrypt_proxy_use_servers %}
+ ## Require servers
+ ##
+ 
+ {% for server in dnscrypt_proxy_use_servers %}
+ {{server}}_servers = true
+ {% endfor %}
+ 
+ {% endif %}
+ {% if dnscrypt_proxy_require_servers %}
+ ## Require properties
+ ##
+ 
+ {% for require in dnscrypt_proxy_require_servers %}
+ require_{{require}} = true
+ {% endfor %}
+ 
+ {% endif %}
+ force_tcp = false
+ timeout = 2500
+ cert_refresh_delay = 240
+ fallback_resolver = "{{dnscrypt_proxy_fallback_resolver}}"
+ ignore_system_dns = true
+ block_ipv6 = false
+ 
+ {% if dnscrypt_proxy_cache %}
+ ## Cache
+ ##
+ 
+ cache = true
+ cache_size = {{dnscrypt_proxy_cache_size}}
+ cache_min_ttl = {{dnscrypt_proxy_cache_min_ttl}}
+ cache_max_ttl = {{dnscrypt_proxy_cache_max_ttl}}
+ cache_neg_min_ttl = {{dnscrypt_proxy_cache_neg_min_ttl}}
+ cache_neg_max_ttl = {{dnscrypt_proxy_cache_neg_max_ttl}}
+ 
+ {% endif %}
+ [sources]
+ {% for source in dnscrypt_proxy_sources %}
+ 
+   [sources.{{source.name}}]
+   urls = [{% for u in source.urls %}"{{u}}"{% if not loop.last %}, {% endif %}{% endfor %}]
+   cache_file = "{{source.name}}.md"
+   minisign_key = "{{source.minisign_key}}"
+   refresh_delay = {{source.refresh_delay}}
+   prefix = "{{source.prefix}}"
+ {% endfor %}

A roles/doh-server/defaults/main.yml => roles/doh-server/defaults/main.yml +18 -0
@@ 0,0 1,18 @@
+ doh_server_addresses:
+   - 127.0.0.1:8053
+ 
+ doh_server_logger: |
+   #!/usr/local/bin/execlineb -P
+   s6-log -b n10 s1000000 t !"gzip -nq9" /var/log/doh-server/
+ 
+ doh_server_retries: 3
+ 
+ doh_server_tcp_only: false
+ 
+ doh_server_timeout: 10
+ 
+ doh_server_upstream:
+   - 1.1.1.1:53
+   - 1.0.0.1:53
+ 
+ doh_server_verbose: false

A roles/doh-server/handlers/main.yml => roles/doh-server/handlers/main.yml +12 -0
@@ 0,0 1,12 @@
+ ---
+ - name: rescan s6
+   become: yes
+   command: s6-svscanctl -an /var/service
+ 
+ - name: restart doh-server
+   become: yes
+   command: s6-svc -t /var/service/doh-server
+ 
+ - name: restart doh-server logger
+   become: yes
+   command: s6-svc -t /var/service/doh-server/log

A roles/doh-server/tasks/main.yml => roles/doh-server/tasks/main.yml +98 -0
@@ 0,0 1,98 @@
+ ---
+ ## Install
+ ##
+ 
+ - name: install
+   become: yes
+   pkgng:
+     name:
+       - git-lite
+       - gmake
+       - go
+     state: latest
+ 
+ 
+ ## Configure
+ ##
+ 
+ - name: ensure doh-server directory
+   become: yes
+   file:
+     path: /usr/local/etc/doh-server
+     state: directory
+     owner: nobody
+     group: wheel
+     mode: 0755
+ 
+ 
+ - name: configure doh-server
+   become: yes
+   template:
+     src: doh-server.conf.j2
+     dest: /usr/local/etc/doh-server/doh-server.conf
+   notify:
+     - restart doh-server
+ 
+ 
+ ## Setup
+ ##
+ 
+ - name: install doh-server
+   become: yes
+   shell: |
+     BUILD_BASE="$(mktemp -d)"
+     SRCDIR="$BUILD_BASE/src"
+     GOPATH="$BUILD_BASE/gopath"
+     trap 'rm -rf $BUILD_BASE' 0 1 2 3 6 14 15
+ 
+     git clone https://github.com/m13253/dns-over-https.git "$SRCDIR"
+     cd "$SRCDIR" || exit 1
+     gmake || exit 1
+     install -m0755 doh-server/doh-server /usr/local/bin/doh-server
+   args:
+     creates: /usr/local/bin/doh-server
+ 
+ 
+ ## Supervise
+ ##
+ 
+ - name: ensure doh-server service directories
+   become: yes
+   file:
+     dest: "{{item}}"
+     state: directory
+   with_items:
+     - /var/service/doh-server
+     - /var/service/doh-server/log
+ 
+ - name: install doh-server service
+   become: yes
+   copy:
+     dest: /var/service/doh-server/run
+     mode: 0555
+     content: |
+       #!/usr/local/bin/execlineb -P
+       fdmove -c 2 1
+       s6-setuidgid nobody
+       /usr/local/bin/doh-server -conf /usr/local/etc/doh-server/doh-server.conf
+   notify:
+     - rescan s6
+     - restart doh-server
+ 
+ - name: install doh-server log service
+   become: yes
+   copy:
+     dest: /var/service/doh-server/log/run
+     mode: 0555
+     content: "{{doh_server_logger}}"
+   notify:
+     - rescan s6
+     - restart doh-server logger
+ 
+ 
+ ## Per-role flush
+ ##
+ 
+ - name: flush handler
+   become: yes
+   meta: flush_handlers

A roles/doh-server/templates/doh-server.conf.j2 => roles/doh-server/templates/doh-server.conf.j2 +20 -0
@@ 0,0 1,20 @@
+ listen = [
+ {% for address in doh_server_addresses %}
+   "{{address}}",
+ {% endfor %}
+ ]
+ 
+ cert = ""
+ key = ""
+ path = "/dns-query"
+ 
+ upstream = [
+ {% for upstream in doh_server_upstream %}
+   "{{upstream}}",
+ {% endfor %}
+ ]
+ 
+ timeout = {{doh_server_timeout}}
+ tries = {{doh_server_retries}}
+ tcp_only = {{"true" if doh_server_tcp_only else "false"}}
+ verbose = {{"true" if doh_server_verbose else "false"}}

A roles/unbound-blocklist/defaults/main.yml => roles/unbound-blocklist/defaults/main.yml +7 -0
@@ 0,0 1,7 @@
+ ---
+ unbound_blocklist_postcmds:
+   - s6-svc -t /var/service/unbound
+ 
+ unbound_blocklist_logger: |
+   #!/usr/local/bin/execlineb -P
+   s6-log -b n10 s1000000 t !"gzip -nq9" /var/log/unbound-blocklist/

A roles/unbound-blocklist/files/blocklist.sh => roles/unbound-blocklist/files/blocklist.sh +93 -0
@@ 0,0 1,93 @@
+ #!/bin/sh
+ 
+ ## Parsing args
+ ##
+ 
+ while [ "$#" -gt 0 ]; do
+     case $1 in
+         -b|--blocklist) blocklist="$2"; shift;;
+         -w|--workdir)   workdir="$2";   shift;;
+         *)
+             printf "%s: illegal option -- %s\\n" "$0" "$1" >&2
+             exit 1
+             ;;
+     esac
+     shift
+ done
+ 
+ 
+ ## Sanity check
+ ##
+ 
+ if [ ! -f "$blocklist" ]; then
+     printf "%s: expected --blocklist to be a path to a file.\\n" "$0" >&2
+     exit 1
+ fi
+ 
+ if [ ! "$workdir" ]; then
+     printf "%s: expected --workdir to be present.\\n" "$0" >&2
+     exit 1
+ fi
+ 
+ if [ -x "$(command -v curl)" ]; then
+     fetch_url() {
+         curl -sfL -o "$1" "$2"
+     }
+ elif [ -x "$(command -v fetch)" ]; then
+     fetch_url() {
+         fetch -qo "$1" "$2"
+     }
+ else
+     printf "%s: expected either fetch or curl to be installed.\\n" "$0" >&2
+     exit 1
+ fi
+ 
+ 
+ ## Main
+ ##
+ 
+ current_time=$(date +%s)
+ cache_age=259200
+ cache_dir="$workdir/cache"
+ tmpfile=$(mktemp)
+ 
+ trap 'rm -f $tmpfile' 0 1 2 3 6 14 15
+ mkdir -p "$cache_dir"
+ 
+ awk -F \# '$1 != "" { print $1 }' "$blocklist" | while read -r line; do
+     eval set -- "$line"
+     case "$1" in
+         hostfile)   _url="$3"; _prefix="$2";;
+         domainonly) _url="$2";;
+     esac
+ 
+     # Cache the host file for at least $cache_age so we can fallback to older
+     # data when fetching fails rather than all hosts from that blocklist
+     # gone missing.
+     _cache_name=$(printf "%s" "$_url" | tr -C "[a-zA-Z0-9]" "_")
+     _cache_file="$cache_dir/$_cache_name"
+     _fetch=1
+ 
+     if [ -f "$_cache_file" ]; then
+         _delta=$((current_time - $(date -r "$_cache_file" +%s)))
+         if [ $_delta -lt $cache_age ]; then
+             printf "%s: skipping %s (age %s < %s)\\n" "$0" "$_url" "$_delta" "$cache_age" >&2
+             _fetch=0
+         fi
+     fi
+ 
+     if [ "$_fetch" != "0" ] && ! fetch_url "$_cache_file" "$_url"; then
+         printf "%s: could not fetch %s, skipping\\n" "$0" "$_url" >&2
+     fi
+ 
+     # Normalize the data
+     if [ -f "$_cache_file" ]; then
+         case "$1" in
+             hostfile)   awk "\$1 == \"$_prefix\" { print tolower(\$2) }" < "$_cache_file";;
+             domainonly) awk -F \# '$1 != "" { print tolower($1) }' < "$_cache_file";;
+         esac | tr -d "\\r"
+     fi
+ done | sort -u | awk '{ print "local-zone: \""$1"\" static" }' > "$tmpfile"
+ 
+ mv "$tmpfile" "$workdir/blocklist.conf"
+ chmod 644 "$workdir/blocklist.conf"

A roles/unbound-blocklist/handlers/main.yml => roles/unbound-blocklist/handlers/main.yml +12 -0
@@ 0,0 1,12 @@
+ ---
+ - name: rescan s6
+   become: yes
+   command: s6-svscanctl -an /var/service
+ 
+ - name: restart unbound-blocklist
+   become: yes
+   command: s6-svc -t /var/service/unbound-blocklist
+ 
+ - name: restart unbound-blocklist logger
+   become: yes
+   command: s6-svc -t /var/service/unbound-blocklist/log

A roles/unbound-blocklist/tasks/main.yml => roles/unbound-blocklist/tasks/main.yml +124 -0
@@ 0,0 1,124 @@
+ ---
+ ## Install
+ ##
+ 
+ - name: install
+   become: yes
+   pkgng:
+     name:
+       - snooze
+     state: latest
+   notify:
+     - restart unbound-blocklist
+ 
+ 
+ ## Configure
+ ##
+ 
+ - name: ensure unbound-blocklist directory
+   become: yes
+   file:
+     path: /usr/local/etc/unbound-blocklist
+     state: directory
+     owner: nobody
+     group: wheel
+     mode: 0755
+ 
+ 
+ - name: copy unbound-blocklist configuration
+   become: yes
+   copy:
+     src: blocklist.txt
+     dest: /usr/local/etc/unbound-blocklist/blocklist.txt
+     owner: nobody
+     group: wheel
+     mode: 0600
+ 
+ 
+ ## Setup
+ ##
+ 
+ - name: ensure libexec directory
+   become: yes
+   file:
+     path: /usr/local/libexec
+ 
+ - name: install unbound-blocklist
+   become: yes
+   copy:
+     src: blocklist.sh
+     dest: /usr/local/libexec/unbound-blocklist
+     owner: nobody
+     group: wheel
+     mode: 0755
+ 
+ - name: setup initial unbound-blocklist
+   become: yes
+   become_user: nobody
+   command: |
+     /usr/local/libexec/unbound-blocklist \
+       --blocklist /usr/local/etc/unbound-blocklist/blocklist.txt \
+       --workdir /usr/local/etc/unbound-blocklist
+   args:
+     creates: /usr/local/etc/unbound-blocklist/blocklist.conf
+ 
+ 
+ ## Supervise
+ ##
+ 
+ - name: ensure unbound-blocklist service directories
+   become: yes
+   file:
+     dest: "{{item}}"
+     state: directory
+   with_items:
+     - /var/service/unbound-blocklist
+     - /var/service/unbound-blocklist/log
+ 
+ - name: install unbound-blocklist service
+   become: yes
+   copy:
+     dest: /var/service/unbound-blocklist/run
+     mode: 0555
+     content: |
+       #!/usr/local/bin/execlineb -P
+       fdmove -c 2 1
+       snooze -d/7 -v -t /usr/local/etc/unbound-blocklist/timefile
+ 
+       if {
+         s6-setuidgid nobody
+         /usr/local/libexec/unbound-blocklist
+           --blocklist /usr/local/etc/unbound-blocklist/blocklist.txt
+           --workdir /usr/local/etc/unbound-blocklist
+       }
+       {% if unbound_blocklist_postcmds %}
+ 
+       {% for postcmd in unbound_blocklist_postcmds %}
+       foreground {
+         {{postcmd|trim|indent(2)}}
+       }
+       {% endfor %}
+       {% endif %}
+ 
+       touch /usr/local/etc/unbound-blocklist/timefile
+   notify:
+     - rescan s6
+     - restart unbound-blocklist
+ 
+ - name: install unbound-blocklist log service
+   become: yes
+   copy:
+     dest: /var/service/unbound-blocklist/log/run
+     mode: 0555
+     content: "{{unbound_blocklist_logger}}"
+   notify:
+     - rescan s6
+     - restart unbound-blocklist logger
+ 
+ 
+ ## Per-role flush
+ ##
+ 
+ - name: flush handler
+   become: yes
+   meta: flush_handlers

A roles/unbound/defaults/main.yml => roles/unbound/defaults/main.yml +61 -0
@@ 0,0 1,61 @@
+ unbound_access_control:
+   - 127.0.0.1/32 allow
+ 
+ unbound_addresses:
+   - 127.0.0.1
+ 
+ unbound_forward_zones:
+   - name: .
+     addresses:
+       - 1.1.1.1@53
+       - 1.0.0.1@53
+ 
+ unbound_do_not_query_localhost: yes
+ 
+ unbound_harden_below_nxdomain: yes
+ 
+ unbound_harden_dnssec_stripped: yes
+ 
+ unbound_harden_glue: yes
+ 
+ unbound_hide_identity: yes
+ 
+ unbound_hide_version: yes
+ 
+ unbound_includes:
+ 
+ unbound_infra_cache_min_rtt: 50
+ 
+ unbound_infra_cache_slabs: 2
+ 
+ unbound_interface_automatic: no
+ 
+ unbound_ip_transparent: no
+ 
+ unbound_key_cache_size: 4m
+ 
+ unbound_key_cache_slabs: 2
+ 
+ unbound_logger: |
+   #!/usr/local/bin/execlineb -P
+   s6-log -b n10 s1000000 t !"gzip -nq9" /var/log/unbound/
+ 
+ unbound_msg_cache_size: 1m
+ 
+ unbound_msg_cache_slabs: 2
+ 
+ unbound_neg_cache_size: 1m
+ 
+ unbound_num_threads: 2
+ 
+ unbound_port: 53
+ 
+ unbound_qname_minimisation: yes
+ 
+ unbound_rrset_cache_size: 4m
+ 
+ unbound_rrset_cache_slabs: 2
+ 
+ unbound_use_caps_for_id: yes
+ 
+ unbound_verbosity: 1

R ansible/roles/unbound/handlers/main.yml => roles/unbound/handlers/main.yml +8 -0
@@ 1,4 1,12 @@ ---
+ - name: rescan s6
+   become: yes
+   command: s6-svscanctl -an /var/service
+ 
  - name: restart unbound
    become: yes
    command: s6-svc -t /var/service/unbound
+ 
+ - name: restart unbound logger
+   become: yes
+   command: s6-svc -t /var/service/unbound/log

R ansible/roles/unbound/tasks/unbound.yml => roles/unbound/tasks/main.yml +38 -9
@@ 1,15 1,24 @@ ---
+ ## Install
+ ##
+ 
  - name: install unbound
    become: yes
-   pkgng: name=unbound state=present
+   pkgng:
+     name: unbound
+     state: present
+   notify:
+     - restart unbound
+ 
+ 
+ ## Configure
+ ##
  
  - name: configure unbound
    become: yes
-   copy:
-     src: unbound/unbound.conf
+   template:
+     src: unbound.conf.j2
      dest: /usr/local/etc/unbound/unbound.conf
-     owner: unbound
-     group: wheel
    notify:
      - restart unbound
  


@@ 38,10 47,14 @@ register: unbound_root_key
    failed_when: unbound_root_key.rc > 1
  
- - name: ensure unbound service directory
+ 
+ ## Supervise
+ ##
+ 
+ - name: ensure unbound
    become: yes
    file:
-     path: "{{item}}"
+     dest: "{{item}}"
      state: directory
    with_items:
      - /var/service/unbound


@@ 50,17 63,33 @@ - name: install unbound service
    become: yes
    copy:
-     src: unbound/run
      dest: /var/service/unbound/run
      mode: 0555
+     content: |
+       #!/usr/local/bin/execlineb -P
+       fdmove -c 2 1
+       {% if unbound_drop_privileges_early %}
+       s6-setuidgid unbound
+       {% endif %}
+       /usr/local/sbin/unbound -d -c /usr/local/etc/unbound/unbound.conf
    notify:
      - rescan s6
+     - restart unbound
  
  - name: install unbound log service
    become: yes
    copy:
-     src: unbound/log
      dest: /var/service/unbound/log/run
      mode: 0555
+     content: "{{unbound_logger}}"
    notify:
      - rescan s6
+     - restart unbound logger
+ 
+ 
+ ## Per-role flush
+ ##
+ 
+ - name: flush handler
+   become: yes
+   meta: flush_handlers

A roles/unbound/templates/unbound.conf.j2 => roles/unbound/templates/unbound.conf.j2 +64 -0
@@ 0,0 1,64 @@
+ server:
+     verbosity: {{unbound_verbosity}}
+ 
+ {% for address in unbound_addresses %}
+     interface: {{address}}
+ {% endfor %}
+     port: {{unbound_port}}
+ {% if unbound_drop_privileges_early %}
+     username: ""
+     chroot: ""
+ {% else %}
+     username: unbound
+     chroot: /usr/local/etc/unbound
+ {% endif %}
+     use-syslog: no
+     logfile: ""
+ 
+     ip-transparent: {{"yes" if unbound_ip_transparent else "no"}}
+     interface-automatic: {{"yes" if unbound_interface_automatic else "no"}}
+ 
+ {% for access in unbound_access_control %}
+     access-control: {{access}}
+ {% endfor %}
+ 
+     num-threads: {{unbound_num_threads}}
+     msg-cache-slabs: {{unbound_msg_cache_slabs}}
+     rrset-cache-slabs: {{unbound_rrset_cache_slabs}}
+     infra-cache-slabs: {{unbound_infra_cache_slabs}}
+     key-cache-slabs: {{unbound_key_cache_slabs}}
+ 
+     msg-cache-size: {{unbound_msg_cache_size}}
+     rrset-cache-size: {{unbound_rrset_cache_size}}
+     key-cache-size: {{unbound_key_cache_size}}
+     neg-cache-size: {{unbound_neg_cache_size}}
+ 
+     infra-cache-min-rtt: {{unbound_infra_cache_min_rtt}}
+     qname-minimisation: {{"yes" if unbound_qname_minimisation else "no"}}
+     use-caps-for-id: {{"yes" if unbound_use_caps_for_id else "no"}}
+     hide-identity: {{"yes" if unbound_hide_identity else "no"}}
+     hide-version: {{"yes" if unbound_hide_version else "no"}}
+ 
+     auto-trust-anchor-file: /usr/local/etc/unbound/root.key
+     dlv-anchor-file: /usr/local/etc/unbound/dlv.isc.org.key
+     root-hints: /usr/local/etc/unbound/named.cache
+ 
+     harden-glue: {{"yes" if unbound_harden_glue else "no"}}
+     harden-dnssec-stripped: {{"yes" if unbound_harden_dnssec_stripped else "no"}}
+     harden-below-nxdomain: {{"yes" if unbound_harden_below_nxdomain else "no"}}
+ 
+     do-not-query-localhost: {{"yes" if unbound_do_not_query_localhost else "no"}}
+ {% if unbound_includes %}
+ 
+ {% for inc in unbound_includes %}
+     include: {{inc}}
+ {% endfor %}
+ {% endif %}
+ {% for zone in unbound_forward_zones %}
+ 
+ forward-zone:
+     name: {{zone.name}}
+     {% for addr in zone.addresses %}
+     forward-addr: {{addr}}
+     {% endfor %}
+ {% endfor %}

D terraform/do_instance.tf => terraform/do_instance.tf +0 -9
@@ 1,9 0,0 @@-resource "digitalocean_droplet" "sg" {
-   image              = "freebsd-11-1-x64-zfs"
-   ipv6               = true
-   name               = "gridns-sg"
-   private_networking = true
-   region             = "sgp1"
-   size               = "s-1vcpu-1gb"
-   ssh_keys           = "${var.do_ssh_keys}"
- }

D terraform/do_network.tf => terraform/do_network.tf +0 -4
@@ 1,4 0,0 @@-resource "digitalocean_floating_ip" "sg" {
-   droplet_id = "${digitalocean_droplet.sg.id}"
-   region     = "${digitalocean_droplet.sg.region}"
- }

D terraform/setup.tf => terraform/setup.tf +0 -11
@@ 1,11 0,0 @@-# Note: will need to build the provider from source until 0.1.4 is released.
- # See here: https://github.com/terraform-providers/terraform-provider-digitalocean
- # Then copy the binary to .terraform.d/plugins/
- #
- #     $ cp $GOPATH/bin/terraform-provider-digitalocean \
- #         ~/.terraform.d/plugins/terraform-provider-digitalocean_v0.1.4-pre
- #
- provider "digitalocean" {
-   token       = "${var.do_token}"
-   version     = "~> 0.1.4-pre"
- }

D terraform/terraform.py => terraform/terraform.py +0 -497
@@ 1,497 0,0 @@-#!/usr/bin/env python
- #
- # Copyright 2015 Cisco Systems, Inc.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- #     http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- """\
- Dynamic inventory for Terraform - finds all `.tfstate` files below the working
- directory and generates an inventory based on them.
- """
- from __future__ import unicode_literals, print_function
- import argparse
- from collections import defaultdict
- from functools import wraps
- import json
- import os
- import re
- 
- VERSION = "0.3.0pre"
- 
- 
- def tfstates(root=None):
-     root = root or os.getcwd()
-     for dirpath, _, filenames in os.walk(root):
-         for name in filenames:
-             if os.path.splitext(name)[-1] == ".tfstate":
-                 yield os.path.join(dirpath, name)
- 
- 
- def iterresources(filenames):
-     for filename in filenames:
-         with open(filename, "r") as json_file:
-             state = json.load(json_file)
-             for module in state["modules"]:
-                 name = module["path"][-1]
-                 for key, resource in module["resources"].items():
-                     yield name, key, resource
- 
- 
- ## READ RESOURCES
- PARSERS = {}
- 
- 
- def _clean_dc(dcname):
-     # Consul DCs are strictly alphanumeric with underscores and hyphens -
-     # ensure that the consul_dc attribute meets these requirements.
-     return re.sub("[^\w_\-]", "-", dcname)
- 
- 
- def iterhosts(resources):
-     """yield host tuples of (name, attributes, groups)"""
-     for module_name, key, resource in resources:
-         resource_type, name = key.split(".", 1)
-         try:
-             parser = PARSERS[resource_type]
-         except KeyError:
-             continue
- 
-         yield parser(resource, module_name)
- 
- 
- def parses(prefix):
-     def inner(func):
-         PARSERS[prefix] = func
-         return func
- 
-     return inner
- 
- 
- def calculate_mantl_vars(func):
-     """calculate Mantl vars"""
- 
-     @wraps(func)
-     def inner(*args, **kwargs):
-         name, attrs, groups = func(*args, **kwargs)
- 
-         # attrs
-         if attrs.get("role", "") == "control":
-             attrs["consul_is_server"] = True
-         else:
-             attrs["consul_is_server"] = False
- 
-         # groups
-         if attrs.get("publicly_routable", False):
-             groups.append("publicly_routable")
- 
-         return name, attrs, groups
- 
-     return inner
- 
- 
- def _parse_prefix(source, prefix, sep="."):
-     for compkey, value in source.items():
-         try:
-             curprefix, rest = compkey.split(sep, 1)
-         except ValueError:
-             continue
- 
-         if curprefix != prefix or rest == "#":
-             continue
- 
-         yield rest, value
- 
- 
- def parse_attr_list(source, prefix, sep="."):
-     attrs = defaultdict(dict)
-     for compkey, value in _parse_prefix(source, prefix, sep):
-         idx, key = compkey.split(sep, 1)
-         attrs[idx][key] = value
- 
-     return attrs.values()
- 
- 
- def parse_dict(source, prefix, sep="."):
-     return dict(_parse_prefix(source, prefix, sep))
- 
- 
- def parse_list(source, prefix, sep="."):
-     return [value for _, value in _parse_prefix(source, prefix, sep)]
- 
- 
- def parse_bool(string_form):
-     token = string_form.lower()[0]
- 
-     if token == "t":
-         return True
-     elif token == "f":
-         return False
-     else:
-         raise ValueError("could not convert %r to a bool" % string_form)
- 
- 
- @parses("digitalocean_droplet")
- @calculate_mantl_vars
- def digitalocean_host(resource, tfvars=None):
-     raw_attrs = resource["primary"]["attributes"]
-     name = raw_attrs["name"]
-     groups = []
- 
-     default_user = "root"
-     default_python = "python"
- 
-     if raw_attrs["image"].find("freebsd") != -1:
-         default_user = "freebsd"
-         default_python = "/usr/local/bin/python3.6"
- 
-     attrs = {
-         "id": raw_attrs["id"],
-         "image": raw_attrs["image"],
-         "ipv4_address": raw_attrs["ipv4_address"],
-         "locked": parse_bool(raw_attrs["locked"]),
-         "metadata": json.loads(raw_attrs.get("user_data", "{}")),
-         "region": raw_attrs["region"],
-         "size": raw_attrs["size"],
-         "ssh_keys": parse_list(raw_attrs, "ssh_keys"),
-         "status": raw_attrs["status"],
-         "tags": parse_list(raw_attrs, "tags"),
-         # ansible
-         "ansible_ssh_host": raw_attrs["ipv4_address"],
-         "ansible_ssh_port": 22,
-         "ansible_ssh_user": default_user,
-         # generic
-         "public_ipv4": raw_attrs["ipv4_address"],
-         "private_ipv4": raw_attrs.get(
-             "ipv4_address_private", raw_attrs["ipv4_address"]
-         ),
-         "provider": "digitalocean",
-     }
- 
-     # attrs specific to Mantl
-     attrs.update(
-         {
-             "consul_dc": _clean_dc(attrs["metadata"].get("dc", attrs["region"])),
-             "role": attrs["metadata"].get("role", "none"),
-             "ansible_python_interpreter": attrs["metadata"].get(
-                 "python_bin", default_python
-             ),
-         }
-     )
- 
-     # add groups based on attrs
-     groups.append("do_image=" + attrs["image"])
-     groups.append("do_locked=%s" % attrs["locked"])
-     groups.append("do_region=" + attrs["region"])
-     groups.append("do_size=" + attrs["size"])
-     groups.append("do_status=" + attrs["status"])
-     groups.extend("do_tag=" + tag for tag in attrs["tags"])
-     groups.extend("do_metadata_%s=%s" % item for item in attrs["metadata"].items())
- 
-     # groups specific to Mantl
-     groups.append("role=" + attrs["role"])
-     groups.append("dc=" + attrs["consul_dc"])
- 
-     return name, attrs, groups
- 
- 
- @parses("aws_instance")
- @calculate_mantl_vars
- def aws_host(resource, module_name):
-     name = resource["primary"]["attributes"]["tags.Name"]
-     raw_attrs = resource["primary"]["attributes"]
- 
-     groups = []
- 
-     attrs = {
-         "ami": raw_attrs["ami"],
-         "availability_zone": raw_attrs["availability_zone"],
-         "ebs_block_device": parse_attr_list(raw_attrs, "ebs_block_device"),
-         "ebs_optimized": parse_bool(raw_attrs["ebs_optimized"]),
-         "ephemeral_block_device": parse_attr_list(raw_attrs, "ephemeral_block_device"),
-         "id": raw_attrs["id"],
-         "key_name": raw_attrs["key_name"],
-         "private": parse_dict(raw_attrs, "private", sep="_"),
-         "public": parse_dict(raw_attrs, "public", sep="_"),
-         "root_block_device": parse_attr_list(raw_attrs, "root_block_device"),
-         "security_groups": parse_list(raw_attrs, "security_groups"),
-         "subnet": parse_dict(raw_attrs, "subnet", sep="_"),
-         "tags": parse_dict(raw_attrs, "tags"),
-         "tenancy": raw_attrs["tenancy"],
-         "vpc_security_group_ids": parse_list(raw_attrs, "vpc_security_group_ids"),
-         # ansible-specific
-         "ansible_ssh_port": 22,
-         "ansible_ssh_host": raw_attrs["public_ip"],
-         # generic
-         "public_ipv4": raw_attrs["public_ip"],
-         "private_ipv4": raw_attrs["private_ip"],
-         "provider": "aws",
-     }
- 
-     # attrs specific to Ansible
-     if "tags.sshUser" in raw_attrs:
-         attrs["ansible_ssh_user"] = raw_attrs["tags.sshUser"]
-     if "tags.sshPrivateIp" in raw_attrs:
-         attrs["ansible_ssh_host"] = raw_attrs["private_ip"]
- 
-     # attrs specific to Mantl
-     attrs.update(
-         {
-             "consul_dc": _clean_dc(attrs["tags"].get("dc", module_name)),
-             "role": attrs["tags"].get("role", "none"),
-             "ansible_python_interpreter": attrs["tags"].get("python_bin", "python"),
-         }
-     )
- 
-     # groups specific to Mantl
-     groups.extend(
-         [
-             "aws_ami=" + attrs["ami"],
-             "aws_az=" + attrs["availability_zone"],
-             "aws_key_name=" + attrs["key_name"],
-             "aws_tenancy=" + attrs["tenancy"],
-         ]
-     )
-     groups.extend("aws_tag_%s=%s" % item for item in attrs["tags"].items())
-     groups.extend(
-         "aws_vpc_security_group=" + group for group in attrs["vpc_security_group_ids"]
-     )
-     groups.extend("aws_subnet_%s=%s" % subnet for subnet in attrs["subnet"].items())
- 
-     # groups specific to Mantl
-     groups.append("role=" + attrs["role"])
-     groups.append("dc=" + attrs["consul_dc"])
- 
-     return name, attrs, groups
- 
- 
- @parses("google_compute_instance")
- @calculate_mantl_vars
- def gce_host(resource, module_name):
-     name = resource["primary"]["id"]
-     raw_attrs = resource["primary"]["attributes"]
-     groups = []
- 
-     # network interfaces
-     interfaces = parse_attr_list(raw_attrs, "network_interface")
-     for interface in interfaces:
-         interface["access_config"] = parse_attr_list(interface, "access_config")
-         for key in interface.keys():
-             if "." in key:
-                 del interface[key]
- 
-     # general attrs
-     attrs = {
-         "can_ip_forward": raw_attrs["can_ip_forward"] == "true",
-         "disks": parse_attr_list(raw_attrs, "disk"),
-         "machine_type": raw_attrs["machine_type"],
-         "metadata": parse_dict(raw_attrs, "metadata"),
-         "network": parse_attr_list(raw_attrs, "network"),
-         "network_interface": interfaces,
-         "self_link": raw_attrs["self_link"],
-         "service_account": parse_attr_list(raw_attrs, "service_account"),
-         "tags": parse_list(raw_attrs, "tags"),
-         "zone": raw_attrs["zone"],
-         # ansible
-         "ansible_ssh_port": 22,
-         "provider": "gce",
-     }
- 
-     # attrs specific to Ansible
-     if "metadata.ssh_user" in raw_attrs:
-         attrs["ansible_ssh_user"] = raw_attrs["metadata.ssh_user"]
- 
-     # attrs specific to Mantl
-     attrs.update(
-         {
-             "consul_dc": _clean_dc(attrs["metadata"].get("dc", module_name)),
-             "role": attrs["metadata"].get("role", "none"),
-             "ansible_python_interpreter": attrs["metadata"].get("python_bin", "python"),
-         }
-     )
- 
-     try:
-         attrs.update(
-             {
-                 "ansible_ssh_host": interfaces[0]["access_config"][0]["nat_ip"]
-                 or interfaces[0]["access_config"][0]["assigned_nat_ip"],
-                 "public_ipv4": interfaces[0]["access_config"][0]["nat_ip"]
-                 or interfaces[0]["access_config"][0]["assigned_nat_ip"],
-                 "private_ipv4": interfaces[0]["address"],
-                 "publicly_routable": True,
-             }
-         )
-     except (KeyError, ValueError):
-         attrs.update({"ansible_ssh_host": "", "publicly_routable": False})
- 
-     # add groups based on attrs
-     groups.extend("gce_image=" + disk["image"] for disk in attrs["disks"])
-     groups.append("gce_machine_type=" + attrs["machine_type"])
-     groups.extend(
-         "gce_metadata_%s=%s" % (key, value)
-         for (key, value) in attrs["metadata"].items()
-         if key not in set(["sshKeys"])
-     )
-     groups.extend("gce_tag=" + tag for tag in attrs["tags"])
-     groups.append("gce_zone=" + attrs["zone"])
- 
-     if attrs["can_ip_forward"]:
-         groups.append("gce_ip_forward")
-     if attrs["publicly_routable"]:
-         groups.append("gce_publicly_routable")
- 
-     # groups specific to Mantl
-     groups.append("role=" + attrs["metadata"].get("role", "none"))
-     groups.append("dc=" + attrs["consul_dc"])
- 
-     return name, attrs, groups
- 
- 
- @parses("azure_instance")
- @calculate_mantl_vars
- def azure_host(resource, module_name):
-     name = resource["primary"]["attributes"]["name"]
-     raw_attrs = resource["primary"]["attributes"]
- 
-     groups = []
- 
-     attrs = {
-         "automatic_updates": raw_attrs["automatic_updates"],
-         "description": raw_attrs["description"],
-         "hosted_service_name": raw_attrs["hosted_service_name"],
-         "id": raw_attrs["id"],
-         "image": raw_attrs["image"],
-         "ip_address": raw_attrs["ip_address"],
-         "location": raw_attrs["location"],
-         "name": raw_attrs["name"],
-         "reverse_dns": raw_attrs["reverse_dns"],
-         "security_group": raw_attrs["security_group"],
-         "size": raw_attrs["size"],
-         "ssh_key_thumbprint": raw_attrs["ssh_key_thumbprint"],
-         "subnet": raw_attrs["subnet"],
-         "username": raw_attrs["username"],
-         "vip_address": raw_attrs["vip_address"],
-         "virtual_network": raw_attrs["virtual_network"],
-         "endpoint": parse_attr_list(raw_attrs, "endpoint"),
-         # ansible
-         "ansible_ssh_port": 22,
-         "ansible_ssh_user": raw_attrs["username"],
-         "ansible_ssh_host": raw_attrs["vip_address"],
-     }
- 
-     # attrs specific to mantl
-     attrs.update(
-         {
-             "consul_dc": attrs["location"].lower().replace(" ", "-"),
-             "role": attrs["description"],
-         }
-     )
- 
-     # groups specific to mantl
-     groups.extend(
-         [
-             "azure_image=" + attrs["image"],
-             "azure_location=" + attrs["location"].lower().replace(" ", "-"),
-             "azure_username=" + attrs["username"],
-             "azure_security_group=" + attrs["security_group"],
-         ]
-     )
- 
-     # groups specific to mantl
-     groups.append("role=" + attrs["role"])
-     groups.append("dc=" + attrs["consul_dc"])
- 
-     return name, attrs, groups
- 
- 
- ## QUERY TYPES
- def query_host(hosts, target):
-     for name, attrs, _ in hosts:
-         if name == target:
-             return attrs
- 
-     return {}
- 
- 
- def query_list(hosts):
-     groups = defaultdict(dict)
-     meta = {}
- 
-     for name, attrs, hostgroups in hosts:
-         for group in set(hostgroups):
-             groups[group].setdefault("hosts", [])
-             groups[group]["hosts"].append(name)
- 
-         meta[name] = attrs
- 
-     groups["_meta"] = {"hostvars": meta}
-     return groups
- 
- 
- def query_hostfile(hosts):
-     out = ["## begin hosts generated by terraform.py ##"]
-     out.extend(
-         "{}\t{}".format(attrs["ansible_ssh_host"].ljust(16), name)
-         for name, attrs, _ in hosts
-     )
- 
-     out.append("## end hosts generated by terraform.py ##")
-     return "\n".join(out)
- 
- 
- def main():
-     parser = argparse.ArgumentParser(
-         __file__, __doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter
-     )
-     modes = parser.add_mutually_exclusive_group(required=True)
-     modes.add_argument("--list", action="store_true", help="list all variables")
-     modes.add_argument("--host", help="list variables for a single host")
-     modes.add_argument("--version", action="store_true", help="print version and exit")
-     modes.add_argument(
-         "--hostfile", action="store_true", help="print hosts as a /etc/hosts snippet"
-     )
-     parser.add_argument(
-         "--pretty", action="store_true", help="pretty-print output JSON"
-     )
-     parser.add_argument(
-         "--nometa", action="store_true", help="with --list, exclude hostvars"
-     )
-     default_root = os.environ.get(
-         "TERRAFORM_STATE_ROOT", os.path.abspath(os.path.join(os.path.dirname(__file__)))
-     )
-     parser.add_argument(
-         "--root", default=default_root, help="custom root to search for `.tfstate`s in"
-     )
- 
-     args = parser.parse_args()
- 
-     if args.version:
-         print("%s %s" % (__file__, VERSION))
-         parser.exit()
- 
-     hosts = iterhosts(iterresources(tfstates(args.root)))
-     if args.list:
-         output = query_list(hosts)
-         if args.nometa:
-             del output["_meta"]
-         print(json.dumps(output, indent=4 if args.pretty else None))
-     elif args.host:
-         output = query_host(hosts, args.host)
-         print(json.dumps(output, indent=4 if args.pretty else None))
-     elif args.hostfile:
-         output = query_hostfile(hosts)
-         print(output)
- 
-     parser.exit()
- 
- 
- if __name__ == "__main__":
-     main()

D terraform/terraform.tfstate => terraform/terraform.tfstate +0 -70
@@ 1,70 0,0 @@-{
-     "version": 3,
-     "terraform_version": "0.11.8",
-     "serial": 3,
-     "lineage": "f577dce2-5a1c-a1b1-68e0-2ba93f02e833",
-     "modules": [
-         {
-             "path": [
-                 "root"
-             ],
-             "outputs": {},
-             "resources": {
-                 "digitalocean_droplet.sg": {
-                     "type": "digitalocean_droplet",
-                     "depends_on": [],
-                     "primary": {
-                         "id": "100050509",
-                         "attributes": {
-                             "disk": "25",
-                             "id": "100050509",
-                             "image": "freebsd-11-1-x64-zfs",
-                             "ipv4_address": "178.128.104.170",
-                             "ipv4_address_private": "10.130.7.232",
-                             "ipv6": "true",
-                             "ipv6_address": "2400:6180:0000:00d1:0000:0000:04b2:d001",
-                             "ipv6_address_private": "",
-                             "locked": "false",
-                             "name": "gridns-sg",
-                             "price_hourly": "0.00744",
-                             "price_monthly": "5",
-                             "private_networking": "true",
-                             "region": "sgp1",
-                             "resize_disk": "true",
-                             "size": "s-1vcpu-1gb",
-                             "ssh_keys.#": "1",
-                             "ssh_keys.0": "10049463",
-                             "status": "active",
-                             "tags.#": "0",
-                             "vcpus": "1"
-                         },
-                         "meta": {},
-                         "tainted": false
-                     },
-                     "deposed": [],
-                     "provider": "provider.digitalocean"
-                 },
-                 "digitalocean_floating_ip.sg": {
-                     "type": "digitalocean_floating_ip",
-                     "depends_on": [
-                         "digitalocean_droplet.sg"
-                     ],
-                     "primary": {
-                         "id": "167.99.31.69",
-                         "attributes": {
-                             "droplet_id": "100050509",
-                             "id": "167.99.31.69",
-                             "ip_address": "167.99.31.69",
-                             "region": "sgp1"
-                         },
-                         "meta": {},
-                         "tainted": false
-                     },
-                     "deposed": [],
-                     "provider": "provider.digitalocean"
-                 }
-             },
-             "depends_on": []
-         }
-     ]
- }

D terraform/terraform.tfvars => terraform/terraform.tfvars +0 -1
@@ 1,1 0,0 @@-do_ssh_keys = [10049463]

D terraform/variables.tf => terraform/variables.tf +0 -8
@@ 1,8 0,0 @@-variable "do_token" {
-   description = "DigitalOcean access token"
- }
- 
- variable "do_ssh_keys" {
-   default     = []
-   description = "DigitalOcean SSH keys"
- }