~mna/tulip-cli

c1dd61c31b3897d1796c14394d45d525e9810d02 — Martin Angers 1 year, 10 months ago
Initial commit
A  => .gitignore +14 -0
@@ 1,14 @@
# environment files
.env*

# locally-installed lua modules
/lua_modules/

# local luarocks configuration
/.luarocks/

# output files generated by tools
*.out

# rocks packages
*.src.rock

A  => .luacheckrc +3 -0
@@ 1,3 @@
std = 'lua53'
files['test/main.lua'].allow_defined_top = true
files['test/main.lua'].global = false

A  => .luacov +3 -0
@@ 1,3 @@
include = {
  'src%/.+$'
}

A  => exp/cmds/deploy/code_script.lua +45 -0
@@ 1,45 @@
local function bash()
  return [[#!/usr/bin/env bash

set -euo pipefail
]]
end

local function stop()
  return [[
# stop both the DB and app services
systemctl stop app
systemctl stop postgresql
]]
end

local function install(tag)
  return string.format([[
if [ -d "/opt/app" ]; then
  rm -rf /opt/app.bak
  mv -T /opt/app /opt/app.bak
fi

mkdir -p /opt/app
curl -o /tmp/%s.tar.gz "https://git.sr.ht/~mna/tulip/archive/%s.tar.gz"
cd /tmp
tar --strip-components=1 --directory /opt/app -xzf %s.tar.gz
rm -f /tmp/%s.tar.gz
]], tag, tag, tag, tag)
end

local function luadeps()
  return [[
cd /opt/app
luarocks install --only-deps *.rockspec
]]
end

return function(tag)
  return table.concat{
    bash(),
    stop(),
    install(tag),
    luadeps(),
  }
end

A  => exp/cmds/deploy/image_script.lua +224 -0
@@ 1,224 @@
local function bash()
  return [[#!/usr/bin/env bash

# This is the cloud-init user data script used to setup a new image
# in a consistent way.

set -euo pipefail
echo '>>> image userdata start'
]]
end

local function dnf()
  return [[
echo '>>> dnf'

# upgrade all packages
dnf --assumeyes upgrade

# install required packages
dnf --assumeyes install   \
  certbot                 \
  fail2ban                \
  gcc                     \
  git                     \
  libpq-devel             \
  lsof                    \
  lua-devel               \
  luarocks                \
  m4                      \
  make                    \
  openssl-devel           \
  postgresql              \
  postgresql-server       \
  postgresql-server-devel \
  redhat-rpm-config       \
  sendmail                \
  the_silver_searcher     \
  vim
]]
end

local function secrets()
  return [[
echo '>>> secrets'

mkdir -p /opt/secrets

# postgresql root password
openssl rand -base64 32 | tr '+/=' '._-' > /opt/secrets/pgroot_pwd
chown postgres:postgres /opt/secrets/pgroot_pwd
chmod 0600 /opt/secrets/pgroot_pwd

# postgresql pgpass file
echo -n 'localhost:*:*:postgres:' > /opt/secrets/pgpass
cat /opt/secrets/pgroot_pwd >> /opt/secrets/pgpass
chown postgres:postgres /opt/secrets/pgpass
chmod 0600 /opt/secrets/pgpass

# CSRF secret key
openssl rand -base64 32 | tr '+/=' '._-' > /opt/secrets/csrf_key
chmod 0600 /opt/secrets/csrf_key
]]
end

local function firewalld()
  return [[
echo '>>> firewalld'

# configure firewalld
systemctl enable firewalld --now
firewall-cmd --zone=public --add-service=http --add-service=https
firewall-cmd --zone=public --add-service=http --add-service=https --permanent
]]
end

local function fail2ban()
  return [[
echo '>>> fail2ban'

# configure fail2ban
cp /etc/fail2ban/fail2ban.conf /etc/fail2ban/fail2ban.local
sed -i 's/^logtarget =.*/logtarget = sysout/g' /etc/fail2ban/fail2ban.local

cat > /etc/fail2ban/jail.local <<EOF
[DEFAULT]
ignoreip = 127.0.0.1/8 ::1
# NOTE: setting action that sends email, but sendmail won't work without
# a valid MX and hostname.
action = %(action_mwl)s

[sshd]
enabled = true
EOF

# enabling sendmail, even though it won't work without valid MX/hostname.
systemctl enable sendmail
systemctl enable fail2ban

# TODO: custom log scanning for app abuse?
]]
end

local function postgres()
  return [[
echo '>>> postgresql'

# build and install pg_cron
pushd /tmp
git clone https://github.com/citusdata/pg_cron.git
cd pg_cron
make && make install
popd

# initialize the database and start the service
PGSETUP_INITDB_OPTIONS='--auth=scram-sha-256 --locale=en_US.UTF8 --encoding=UTF8 --pwfile=/opt/secrets/pgroot_pwd' \
  postgresql-setup --initdb --unit postgresql

cat >> /var/lib/pgsql/data/postgresql.conf <<EOF
shared_preload_libraries = 'pg_cron'
cron.database_name = 'postgres'
EOF

systemctl enable --now postgresql

PGPASSFILE=/opt/secrets/pgpass \
  psql --username postgres     \
       --command 'CREATE EXTENSION pg_cron;'
]]
end

local function luadeps()
  return [[
echo '>>> lua dependencies'

# install pre-required Lua dependencies (not handled by the rockspec
# application file).
luarocks install luarocks-fetch-gitrec
luarocks install luaossl 'CFLAGS=-DHAVE_EVP_KDF_CTX=1 -fPIC'

# install dependencies required by the dummy app to test connection
# to postgres and sleep.
luarocks install cqueues-pgsql
luarocks install xpgsql
]]
end

local function service()
  return [[
echo '>>> application service'

# install a dummy lua app just to be able to test the setup,
# until an actual app is deployed.
mkdir -p /opt/app/scripts

cat > /opt/app/scripts/run <<EOF
#!/usr/bin/env lua

local cqueues = require 'cqueues'
local xpgsql = require 'xpgsql'

local cq = cqueues.new()
cq:wrap(function()
  local conn = assert(xpgsql.connect())
  while true do
    local res = assert(conn:query('SELECT 1'))
    assert(res[1][1] == '1')
    cqueues.sleep(10)
  end
end)
assert(cq:loop())
EOF

chmod +x /opt/app/scripts/run

cat > /etc/systemd/system/app.service <<EOF
[Unit]
Description=The Application service

Requires=postgresql.service
After=network.target

Conflicts=certbot.service
Before=certbot.service

[Service]
Type=exec
ExecStart=/opt/app/scripts/run
Restart=always

WorkingDirectory=/opt/app

Environment=PGPASSFILE=/opt/secrets/pgpass
Environment=PGHOST=localhost
Environment=PGPORT=5432
Environment=PGCONNECT_TIMEOUT=10
Environment=PGUSER=postgres
Environment=PGDATABASE=postgres
Environment=TULIP_CSRFKEY=`cat /opt/secrets/csrf_key`
Environment=LUA_PATH='/usr/share/lua/5.3/?.lua;/usr/share/lua/5.3/?/init.lua;/usr/lib64/lua/5.3/?.lua;/usr/lib64/lua/5.3/?/init.lua;./?.lua;./?/init.lua'
Environment=LUA_CPATH='/usr/lib/lua/5.3/?.so;/usr/lib64/lua/5.3/?.so;/usr/lib64/lua/5.3/loadall.so;./?.so'

[Install]
WantedBy=multi-user.target
EOF

systemctl enable --now app
]]
end

return table.concat{
  -- TODO: create a user for the app, will not run as root, and make sure secrets
  -- and other files are readable by this user.
  -- TODO: create a DB user for the app (with secret pwd) and add it to pgpass.
  bash(),
  dnf(),
  secrets(),
  firewalld(),
  -- TODO: configure certbot
  fail2ban(),
  postgres(),
  luadeps(),
  service(),
  '\nreboot\n',
}

A  => exp/cmds/deploy/init.lua +418 -0
@@ 1,418 @@
local codesh = require 'scripts.cmds.deploy.code_script'
local fn = require 'fn'
local imgsh = require 'scripts.cmds.deploy.image_script'
local sh = require 'shell'
local svcsh = require 'scripts.cmds.deploy.svc_script'
local unistd = require 'posix.unistd'

local function log(s, ...)
  local msg = string.format(s, ...)
  io.write(msg)
  if not string.match(msg, '\n$') then
    io.flush()
  end
end

local function get_domain(domain)
  -- assume the part before the first dot is the subdomain
  local sub, main = string.match(domain, '^([^%.]+)%.(.+)$')

  -- check if the domain exists
  log('> get domain %s...', main)
  local ok = (sh.cmd('doctl', 'compute', 'domain', 'list', '--no-header', '--format', 'Domain') |
    sh.cmd('grep', string.format('^%s$', main))):exec()
  if not ok then
    return
  end

  local out = assert(sh.cmd(
    'doctl', 'compute', 'domain', 'records', 'list',
    main, '--no-header', '--format', 'ID,Type,Data,Name'
  ):output())

  local o = {domain = domain, subdomain = sub, maindomain = main}
  for id, typ, data, name in string.gmatch(out, '%f[^%s\0](%S+)%s+(%S+)%s+(%S+)%s+(%S+)') do
    if name == sub and (typ == 'A' or typ == 'AAAA') then
      o[typ] = {id = id, ip = data}
    end
  end
  log(' ok\n')
  return o
end

-- assign the domain to this node
local function set_domain(dom_obj, node)
  -- if the domain is already mapped to an ip address, update it
  local out = assert(sh.cmd('doctl', 'compute', 'domain', 'records', 'list',
    dom_obj.maindomain, '--format', 'ID,Type', '--no-header'):output())
  local rec_id = string.match(out, '%f[^%s\0](%S+)%s+A')
  if rec_id then
    log('> update domain A record of %s to %s...', dom_obj.domain, node.ip4)
    assert(sh.cmd('doctl', 'compute', 'domain', 'records', 'update',
      dom_obj.maindomain, '--record-name', dom_obj.subdomain,
      '--record-id', rec_id, '--record-ttl', 120, '--record-type', 'A',
      '--record-data', node.ip4):output())
  else
    log('> create domain A record of %s to %s...', dom_obj.domain, node.ip4)
    assert(sh.cmd('doctl', 'compute', 'domain', 'records', 'create',
      dom_obj.maindomain, '--record-name', dom_obj.subdomain,
      '--record-ttl', 120, '--record-type', 'A',
      '--record-data', node.ip4):output())
  end
  log(' ok\n')
end

local function set_project(project, node)
  log('> assign node to project %s...', project)

  local out = assert(sh.cmd('doctl', 'projects', 'list',
    '--format', 'ID,Name', '--no-header'):output())
  local proj_id
  for id, nm in string.gmatch(out, '%f[^%s\0](%S+)%s+(%S+)') do
    if nm == project then
      proj_id = id
      break
    end
  end
  assert(proj_id, 'could not find project')

  assert(sh.cmd('doctl', 'projects', 'resources', 'assign',
    proj_id, '--resource', 'do:droplet:' .. node.id):output())

  log(' ok\n')
end

local function set_firewall(firewall, node)
  log('> assign firewall %s to node...', firewall)

  local out = assert(sh.cmd('doctl', 'compute', 'firewall', 'list',
    '--format', 'ID,Name', '--no-header'):output())
  local fw_id
  for id, nm in string.gmatch(out, '%f[^%s\0](%S+)%s+(%S+)') do
    if nm == firewall then
      fw_id = id
      break
    end
  end
  assert(fw_id, 'could not find firewall')

  assert(sh.cmd('doctl', 'compute', 'firewall', 'add-droplets',
    fw_id, '--droplet-ids', node.id):output())

  log(' ok\n')
end

local function get_ssh_keys(list)
  local names = {}
  for name in string.gmatch(list, '([^,]+)') do
    names[name] = true
  end

  log('> get ssh key id(s)...')
  local out = assert(sh.cmd(
    'doctl', 'compute', 'ssh-key', 'list',
    '--no-header', '--format', 'ID,Name,FingerPrint'
  ):output())

  local ar = {}
  for id, name, fp in string.gmatch(out, '%f[^%s\0](%S+)%s+(%S+)%s+(%S+)') do
    if names[name] then
      table.insert(ar, {id = id, name = name, fingerprint = fp})
    end
  end
  log(' ok\n')
  return ar
end

-- creates a node to configure a new image, takes a snapshot of the node then
-- destroys it and returns the id of the snapshot, ready to use in a node creation.
local function create_image(dom_obj, region, opts)
  local SIZE = 's-1vcpu-1gb'
  local BASE_IMAGE = 'fedora-32-x64'

  if opts.ssh_keys and not opts.key_ids then
    -- ssh key ids need to be comma-separated
    local keys = get_ssh_keys(opts.ssh_keys)
    opts.key_ids = table.concat(fn.reduce(function(cumul, _, v)
      table.insert(cumul, v.id)
      return cumul
    end, {}, ipairs(keys)), ',')
  end
  if (not opts.key_ids) or (opts.key_ids == '') then
    error('at least one ssh key must be provided to prevent password-based login')
  end

  local tags
  if opts.tags then
    tags = opts.tags -- tags need to be comma-separated
  end

  -- DigitalOcean doesn't prevent creation of nodes with the same name.
  -- Add the current epoch to help make it unique.
  local name = string.gsub(dom_obj.domain, '%.', '-') .. '.base.' .. os.time()
  local args = {
    'doctl', 'compute', 'droplet', 'create', name,
    '--image', BASE_IMAGE, '--region', region, '--ssh-keys', opts.key_ids,
    '--size', SIZE, '--user-data', imgsh, '--wait',
  }
  if tags then
    table.insert(args, '--tag-names')
    table.insert(args, tags)
  end
  log('> create base image node %s...', name)
  assert(sh.cmd(table.unpack(args)):output())
  log(' ok\n')

  -- get this droplet's id
  log('> get base image node id of %s...', name)
  local out = sh.cmd('doctl', 'compute', 'droplet', 'list', '--format', 'ID,Name,Public IPv4', '--no-header'):output()
  local base_id, base_ip
  for id, nm, ip in string.gmatch(out, '%f[^%s\0](%S+)%s+(%S+)%s+(%S+)') do
    if nm == name then
      base_id = id
      base_ip = ip
      break
    end
  end
  assert(base_id, 'could not find base node used to create image')
  log(' ok\n')

  io.write(string.format(
    [[
> base node for %s is being configured...

  You may inspect its progress by running:
    $ doctl compute ssh %s
  and follow the configuration progress by running:
    $ journalctl -fu cloud-final

  Note that it will reboot after configuration, you should check that
  after the reboot everything is running correctly, e.g. by running:
    $ systemctl status

  You should extract the generated secrets locally and store them
  securely:
    $ mkdir -p ./run/secrets/%s
    $ scp root@%s:/opt/secrets/* ./run/secrets/%s/

Press ENTER when ready to continue.
]], name, base_id, name, base_ip, name))
  io.read('l')

  log('> shutdown base image node %s...', name)
  if not sh.cmd('doctl', 'compute', 'droplet-action', 'shutdown', base_id, '--wait'):output() then
    error(string.format('failed to shutdown base image node %s (id=%s), delete it manually', name, base_id))
  end
  log(' ok\n')
  log('> create snapshot %s of base image node...', name)
  if not sh.cmd('doctl', 'compute', 'droplet-action', 'snapshot',
      base_id, '--snapshot-name', name, '--wait'):output() then
    error(string.format('failed to create snapshot image of node %s (id=%s), delete it manually', name, base_id))
  end
  log(' ok\n')
  log('> destroy base image node %s...', name)
  if not sh.cmd('doctl', 'compute', 'droplet', 'delete', base_id, '--force'):output() then
    error(string.format('failed to destroy base image node %s (id=%s), delete it manually', name, base_id))
  end
  log(' ok\n')

  log('> get snapshot id of %s...', name)
  local snapshot_id
  out = sh.cmd('doctl', 'compute', 'image', 'list', '--format', 'ID,Name', '--no-header'):output()
  for id, nm in string.gmatch(out, '%f[^%s\0](%S+)%s+(%S+)') do
    if nm == name then
      snapshot_id = id
      break
    end
  end
  assert(snapshot_id, 'could not find snapshot')
  log(' ok\n')

  return snapshot_id
end

-- returns the image ID of the image corresponding to the provided name/slug.
local function get_image(image)
  -- validate that the image is valid and warn if it is a public
  -- one (unlikely to be secured and ready to run the app)
  log('> get image %s...', image)
  local out = (
    sh.cmd('doctl', 'compute', 'image', 'list', '--format', 'ID,Name,Public', '--public') |
    sh.cmd('grep', '--fixed-strings', ' ' .. image .. ' ')):output()
  local id, _, pub = string.match(out, '%f[^%s\0](%S+)%s+(%S+)%s+(%S+)')
  if not id then
    error('image does not exist')
  elseif pub ~= 'false' then
    print('\n', out, id, pub)
    io.write(string.format(
      'image %s is public, it is probably not secure nor fitting to deploy on this, continue anyway? [y/N]',
      image))
    local res = io.read('l')
    if not string.match(res, '^%s*[yY]') then
      error('canceled by user')
    end
  end
  log(' ok\n')
  return id
end

local function create_node(dom_obj, opts)
  local parts = {}
  for s in string.gmatch(opts.create, '([^:]+)') do
    table.insert(parts, s)
  end
  if #parts < 2 or #parts > 3 then
    error('invalid --create value, want REGION:SIZE or REGION:SIZE:IMAGE')
  end

  local region, size, image = table.unpack(parts)
  local image_id
  if not image then
    image_id = create_image(dom_obj, region, opts)
  else
    image_id = get_image(image)
  end
  assert(image_id, 'could not get image id')

  -- ssh key(s) is required, otherwise the droplet is created with
  -- a root password, insecure.
  if opts.ssh_keys and not opts.key_ids then
    -- ssh key ids need to be comma-separated
    local keys = get_ssh_keys(opts.ssh_keys)
    opts.key_ids = table.concat(fn.reduce(function(cumul, _, v)
      table.insert(cumul, v.id)
      return cumul
    end, {}, ipairs(keys)), ',')
  end
  if (not opts.key_ids) or (opts.key_ids == '') then
    error('at least one ssh key must be provided to prevent password-based login')
  end

  local tags
  if opts.tags then
    tags = opts.tags -- tags need to be comma-separated
  end

  -- DigitalOcean doesn't prevent creation of nodes with the same name.
  -- Add the current epoch to help make it unique.
  local name = string.gsub(dom_obj.domain, '%.', '-') .. '.' .. os.time()
  local args = {
    'doctl', 'compute', 'droplet', 'create', name,
    '--image', image_id, '--region', region,
    '--ssh-keys', opts.key_ids, '--size', size, '--wait',
    '--format', 'ID,Name,Public IPv4', '--no-header',
  }
  if tags then
    table.insert(args, '--tag-names')
    table.insert(args, tags)
  end
  log('> create node %s based on image id %s...', name, image_id)
  local out = assert(sh.cmd(table.unpack(args)):output())
  local id, _, ip4 = string.match(out, '%f[^%s\0](%S+)%s+(%S+)%s+(%S+)')
  log(' ok\n')
  return {id = id, name = name, ip4 = ip4}
end

local function get_node(dom_obj)
  if not dom_obj.A then
    error('domain is not associated to any node')
  end

  log('> get node associated with IP address %s...', dom_obj.A.ip)
  local out = assert(sh.cmd('doctl', 'compute', 'droplet', 'list',
    '--format', 'ID,Name,Public IPv4', '--no-header'):output())

  for id, name, ip4 in string.gmatch(out, '%f[^%s\0](%S+)%s+(%S+)%s+(%S+)') do
    if ip4 == dom_obj.A.ip then
      log(' ok\n')
      return {id = id, name = name, ip4 = ip4}
    end
  end
end

local function deploy_code(tag, node)
  if not tag then
    -- get latest tag
    tag = assert(sh.cmd('git', 'describe', '--tags', '--abbrev=0'):output())
  end
  log('> deploy code at tag %s to %s...', tag, node.name)
  assert((
    sh.cmd('echo', codesh(tag)) |
    sh.cmd('ssh', '-o', 'StrictHostKeyChecking no', 'root@' .. node.ip4)
  ):output())
  log(' ok\n')
end

local function restart_services(node)
  log('> restart services on %s...', node.name)
  assert((
    sh.cmd('echo', svcsh) |
    sh.cmd('ssh', '-o', 'StrictHostKeyChecking no', 'root@' .. node.ip4)
  ):output())
  log(' ok\n')
end

local REQUIRES_CREATE = {
  'firewall',
  'project',
  'ssh_keys',
  'tags',
}

return function(domain, opts)
  -- check that provided flags are accepted in the current context
  if not opts.create then
    for _, arg in ipairs(REQUIRES_CREATE) do
      if opts[arg] then
        error(string.format('option %s requires --create', arg))
      end
    end
  end

  -- get the domain object, which must already exist on the provider
  local dom_obj = get_domain(domain)
  assert(dom_obj, 'domain does not exist')

  -- step 1: create new node if requested
  local node
  if opts.create then
    node = create_node(dom_obj, opts)
    if opts.project then
      set_project(opts.project, node)
    end
    if opts.firewall then
      set_firewall(opts.firewall, node)
    end
    -- wait a bit, might not be able to ssh into it otherwise
    unistd.sleep(5)
  else
    node = get_node(dom_obj)
    assert(node, string.format('no node exists for IP address %s', dom_obj.A.ip))
  end

  -- step 2: restore from a database backup if requested
  if opts.with_db then
    -- TODO: step 2: install database from backup
  end

  -- step 3: deploy the code if requested
  if not opts.without_code then
    deploy_code(opts.with_code, node)
  end

  -- step 4: restart DB and app services
  restart_services(node)

  -- step 5: activate the new deployment, if a new node was created
  if opts.create then
    -- activate the new node for that sub-domain
    set_domain(dom_obj, node)
  end

  log([[
> done.

  You may inspect the node by running:
    $ doctl compute ssh %s
]], node.id)
end

A  => exp/cmds/deploy/svc_script.lua +18 -0
@@ 1,18 @@
local function bash()
  return [[#!/usr/bin/env bash

set -euo pipefail
]]
end

local function restart()
  return [[
systemctl restart postgresql
systemctl restart app
]]
end

return table.concat{
  bash(),
  restart(),
}

A  => exp/deploy.lua +93 -0
@@ 1,93 @@
#!/usr/bin/env -S llrocks run

local fn = require 'fn'
local OptionParser = require 'optparse'

local help = [[
v0.0.0
Usage: deploy.lua [<options>] DOMAIN

The deploy script is the combination of:
1. create an infrastructure (optional)
2. install the database on that infrastructure (optional)
3. deploy the new code to that infrastructure (optional)
4. (re)start the application's services
5. activate this deployment (optional)

The usage of the command looks like this:

$ deploy www.example.com

By default, this is sufficient to run the most common case, which
is to deploy the latest tagged version to the existing infrastructure
associated with that sub-domain (as identified by looking up the
node with the IP address linked to the sub-domain). The existing
database is left untouched, and only the new code is deployed and
the application is restarted (so only steps 3 and 4 are executed).

More complex scenarios follow:

$ deploy --create 'region:size[:image]' --ssh-keys 'list,of,names' --tags 'list,of,tags' www.example.com

    Create a new infrastructure with the specified region, size, and optional image,
    and enable the ssh keys identified by name and apply the set of tags.
    If no image is provided, a new image is created and used. Steps 1, 2 (an empty database),
    3, 4 and 5 are executed. Step 5 simply involves mapping the sub-domain to the IP address
    of the new node.

$ deploy --create ... --with-db 'db-backup-id' www.example.com

    Create a new infrastructure, but restores the database from the specified backup.

$ deploy --with-db 'db-backup-id' --with-code 'git-tag' www.example.com

    Restore the database from the specified backup in the existing infrastructure (so, execute
    steps 2, 3 and 4 only).

$ deploy --with-db 'db-backup-id' --without-code www.example.com

    Restore the database from the specified backup in the existing infrastructure and do not
    deploy any code (so, execute steps 2 and 4 only).

$ deploy --without-code www.example.com

    Only restarts the services on the existing infrastructure, no code nor database is
    deployed (so, execute step 4 only).

Options:

  --create=R:S:I        Create a new node using region R, size S and the optional
                        image I (creates a new image if not provided).
  --firewall=NAME       Assign this firewall to the new node. Requires --create.
  -h, --help            Display this help and exit.
  --project=NAME        Assign the new node to this project. Requires --create.
  --ssh-keys=k1,k2,...  Associate the ssh keys identified by the comma-separated list of key
                        names with the new node. Requires --create.
  --tags=t1,t2,...      Associate the comma-separated list of tags with the new node.
                        Requires --create.
  -V, --version         Display the version and exit.
  --with-code=TAG       Installs the code at the git version identified by TAG. Defaults to
                        the latest tag.
  --with-db=DB          Restores or installs the specified database backup.
  --without-code        Does not deploy code.
]]

local parser = OptionParser(help)
local arg, opts = parser:parse(_G.arg)

local found, ix = fn.any(function(_, v)
  return string.match(v, '^%-%-?[^%-]+')
end, ipairs(arg))
if found then
  parser:opterr(string.format('unrecognized flag: %s', arg[ix]))
  return
elseif #arg > 1 then
  parser:opterr(string.format('unexpected arguments starting with: %s', arg[1]))
  return
elseif #arg == 0 then
  parser:opterr('the domain argument is required')
  return
end

local cmd = require 'scripts.cmds.deploy'
cmd(arg[1], opts)

A  => src/cmds/init.lua +3 -0
@@ 1,3 @@
return {
  init = require 'src.cmds.initcmd',
}

A  => src/cmds/initcmd.lua +7 -0
@@ 1,7 @@
local inspect = require 'inspect'

return function(args, opts)
  print(inspect(args))
  print(inspect(opts))
  return true
end

A  => src/main.lua +36 -0
@@ 1,36 @@
local OptionParser = require 'optparse'
local VERSION = "0.0.1"

local help = string.format([[
tulip-cli v%s
Usage: tulip-cli CMD [<options>]

The following tulip commands are supported:

  init                  Initialize environment for development of a
                        tulip project.

Options:

  -h, --help            Display this help and exit.
  -V, --version         Display the version and exit.
]], VERSION)

local cmds = require 'src.cmds'
local parser = OptionParser(help)

return function(args)
  local arg, opts = parser:parse(args)
  if #arg == 0 then
    parser:opterr('the command is required')
    return true
  end

  local f = cmds[arg[1]]
  if not f then
    parser:opterr(string.format('unknown command %q', arg[1]))
    return true
  end

  return f(arg, opts)
end

A  => tulip-cli.lua +4 -0
@@ 1,4 @@
#!/usr/bin/env lua

local main = require 'src.main'
assert(main(_G.arg))