ctucx.git: ansible-configs

My personal ansible roles and playbooks [deprecated in favor of nixos]

commit 33495137adcf1b7ae4761c02a1a45436478beaa6
parent fa669646f963192cd77e15cb8705ff085c45c0a3
Author: Leah (ctucx) <leah@ctu.cx>
Date: Tue, 12 Jan 2021 19:35:18 +0100

refactor almost everything
292 files changed, 13141 insertions(+), 5915 deletions(-)
M
.DS_Store
|
0
A
.ansible-cache/10.0.0.1
|
497
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
.ansible-cache/taurus.ctu.cx
|
960
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
.ansible-cache/wanderduene.ctu.cx
|
949
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
.gitignore
|
7
+++++++
A
_playbook-router-alpine.yml
|
110
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
D
alpine/.DS_Store
|
0
D
alpine/config-files/acme-redirect/acme-redirect.conf
|
3
---
D
alpine/config-files/awall/custom-services.json
|
10
----------
D
alpine/config-files/awall/dns.json
|
13
-------------
D
alpine/config-files/awall/frps.json
|
13
-------------
D
alpine/config-files/awall/mail.json
|
37
-------------------------------------
D
alpine/config-files/awall/web.json
|
19
-------------------
D
alpine/config-files/cgit/cgitrc
|
59
-----------------------------------------------------------
D
alpine/config-files/ferm/ferm-lollo.conf
|
96
-------------------------------------------------------------------------------
D
alpine/config-files/grafana/grafana.ini
|
818
-------------------------------------------------------------------------------
D
alpine/config-files/grafana/provisioning/dashboards/node-stats.json
|
1435
-------------------------------------------------------------------------------
D
alpine/config-files/nginx/nginx.conf
|
52
----------------------------------------------------
D
alpine/config-files/nginx/proxy.conf
|
16
----------------
D
alpine/config-files/nginx/ssl.conf
|
21
---------------------
D
alpine/config-files/prometheus/prometheus.yml
|
42
------------------------------------------
D
alpine/config-files/rest-server/rest-server.initd
|
15
---------------
D
alpine/config-files/sudo/sudoers.patch
|
11
-----------
D
alpine/config-files/website-vhosts/ctu.cx.conf
|
43
-------------------------------------------
D
alpine/config-files/website-vhosts/photos.ctu.cx.conf
|
20
--------------------
D
alpine/config-files/website-vhosts/repo.f2k1.de.conf
|
15
---------------
D
alpine/inventory
|
12
------------
D
alpine/playbook-router.yml
|
78
------------------------------------------------------------------------------
D
alpine/playbook-servers.yml
|
345
-------------------------------------------------------------------------------
D
alpine/roles/acme-redirect/tasks/main.yml
|
77
-----------------------------------------------------------------------------
D
alpine/roles/acme-redirect/templates/acme-redirect.conf.j2
|
12
------------
D
alpine/roles/backup/tasks/main.yml
|
18
------------------
D
alpine/roles/backup/tasks/wanderduene.yml
|
41
-----------------------------------------
D
alpine/roles/bind/tasks/main.yml
|
45
---------------------------------------------
D
alpine/roles/bind/templates/named.conf.j2
|
14
--------------
D
alpine/roles/cgit/tasks/main.yml
|
55
-------------------------------------------------------
D
alpine/roles/cgit/templates/cgit-vhost.conf.j2
|
21
---------------------
D
alpine/roles/common/tasks/chrony.yml
|
15
---------------
D
alpine/roles/common/tasks/firewall-awall.yml
|
43
-------------------------------------------
D
alpine/roles/common/tasks/firewall-ferm.yml
|
23
-----------------------
D
alpine/roles/common/tasks/main.yml
|
21
---------------------
D
alpine/roles/common/tasks/network.yml
|
80
-------------------------------------------------------------------------------
D
alpine/roles/common/tasks/node-exporter.yml
|
10
----------
D
alpine/roles/common/tasks/packages.yml
|
32
--------------------------------
D
alpine/roles/common/tasks/sshd.yml
|
18
------------------
D
alpine/roles/common/tasks/sudo.yml
|
12
------------
D
alpine/roles/common/tasks/users.yml
|
34
----------------------------------
D
alpine/roles/common/templates/awall-baseconfig.yaml.j2
|
18
------------------
D
alpine/roles/common/templates/hosts.conf.j2
|
2
--
D
alpine/roles/common/templates/interfaces.conf.j2
|
59
-----------------------------------------------------------
D
alpine/roles/common/templates/repositories.j2
|
7
-------
D
alpine/roles/common/templates/resolv.conf.j2
|
4
----
D
alpine/roles/dnsmasq/tasks/main.yml
|
18
------------------
D
alpine/roles/dnsmasq/templates/dnsmasq.conf.j2
|
34
----------------------------------
D
alpine/roles/frp/tasks/frpc.yml
|
20
--------------------
D
alpine/roles/frp/tasks/frps.yml
|
37
-------------------------------------
D
alpine/roles/frp/tasks/main.yml
|
14
--------------
D
alpine/roles/frp/templates/frpc.conf.j2
|
34
----------------------------------
D
alpine/roles/frp/templates/frps-vhost.conf.j2
|
32
--------------------------------
D
alpine/roles/frp/templates/frps.confd.j2
|
2
--
D
alpine/roles/gitolite/tasks/main.yml
|
59
-----------------------------------------------------------
D
alpine/roles/grafana/tasks/main.yml
|
40
----------------------------------------
D
alpine/roles/grafana/templates/grafana-vhost.conf.j2
|
15
---------------
D
alpine/roles/hostapd/tasks/main.yml
|
18
------------------
D
alpine/roles/hostapd/templates/hostapd.conf.j2
|
23
-----------------------
D
alpine/roles/maddy/tasks/main.yml
|
52
----------------------------------------------------
D
alpine/roles/maddy/templates/maddy.conf.j2
|
184
-------------------------------------------------------------------------------
D
alpine/roles/nginx/tasks/main.yml
|
76
----------------------------------------------------------------------------
D
alpine/roles/nginx/templates/vhost.conf.j2
|
22
----------------------
D
alpine/roles/oeffi-web/tasks/main.yml
|
64
----------------------------------------------------------------
D
alpine/roles/oeffi-web/templates/oeffi-web-vhost.conf.j2
|
26
--------------------------
D
alpine/roles/oeffi-web/templates/oeffi-web.initd.j2
|
29
-----------------------------
D
alpine/roles/oeffisearch/tasks/main.yml
|
64
----------------------------------------------------------------
D
alpine/roles/oeffisearch/templates/oeffisearch-vhost.conf.j2
|
27
---------------------------
D
alpine/roles/oeffisearch/templates/oeffisearch.initd.j2
|
29
-----------------------------
D
alpine/roles/pleroma/tasks/main.yml
|
54
------------------------------------------------------
D
alpine/roles/pleroma/templates/pleroma-vhost.conf.j2
|
40
----------------------------------------
D
alpine/roles/prometheus/tasks/main.yml
|
42
------------------------------------------
D
alpine/roles/prometheus/templates/prometheus-vhost.conf.j2
|
19
-------------------
D
alpine/roles/radicale/tasks/main.yml
|
42
------------------------------------------
D
alpine/roles/radicale/templates/radicale-vhost.conf.j2
|
18
------------------
D
alpine/roles/rest-server/tasks/main.yml
|
46
----------------------------------------------
D
alpine/roles/rest-server/templates/rest-server-vhost.conf.j2
|
19
-------------------
D
alpine/roles/synapse/tasks/main.yml
|
51
---------------------------------------------------
D
alpine/roles/synapse/templates/synapse-vhost.conf.j2
|
20
--------------------
D
alpine/roles/syncthing/tasks/main.yml
|
46
----------------------------------------------
D
alpine/roles/syncthing/templates/syncthing-initd.j2
|
24
------------------------
D
alpine/roles/syncthing/templates/syncthing-vhost.conf.j2
|
16
----------------
D
alpine/roles/websites/tasks/ctu.cx.yml
|
21
---------------------
D
alpine/roles/websites/tasks/main.yml
|
15
---------------
D
alpine/roles/websites/tasks/photos.ctu.cx.yml
|
21
---------------------
D
alpine/roles/websites/tasks/repo.f2k1.de.yml
|
14
--------------
D
alpine/scripts/restic-backup-wanderduene.sh
|
20
--------------------
A
ansible.cfg
|
12
++++++++++++
D
arch/config-files/common/pacman.conf.patch
|
13
-------------
D
arch/config-files/ferm/ferm-lollo.conf
|
105
-------------------------------------------------------------------------------
D
arch/inventory
|
6
------
D
arch/playbook.yml
|
183
-------------------------------------------------------------------------------
D
arch/roles/common/tasks/firewall.yml
|
20
--------------------
D
arch/roles/common/tasks/ip-forwarding.yml
|
38
--------------------------------------
D
arch/roles/common/tasks/main.yml
|
19
-------------------
D
arch/roles/common/tasks/network.yml
|
19
-------------------
D
arch/roles/common/tasks/node-exporter.yml
|
13
-------------
D
arch/roles/common/tasks/packages.yml
|
34
----------------------------------
D
arch/roles/common/tasks/sshd.yml
|
13
-------------
D
arch/roles/common/tasks/sudo.yml
|
8
--------
D
arch/roles/common/tasks/users.yml
|
34
----------------------------------
D
arch/roles/dnsmasq/tasks/main.yml
|
18
------------------
D
arch/roles/dnsmasq/templates/dnsmasq.conf.j2
|
70
----------------------------------------------------------------------
D
arch/roles/hostapd/tasks/main.yml
|
18
------------------
D
arch/roles/hostapd/templates/hostapd.conf.j2
|
23
-----------------------
R
alpine/config-files/cgit/cgit.css -> config-files/cgit/cgit.css
|
0
A
config-files/cgit/cgitrc
|
59
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
config-files/ferm/lollo.conf
|
106
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
config-files/grafana/grafana.ini
|
547
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
R
alpine/config-files/grafana/provisioning/dashboards/FritzBox.json -> config-files/grafana/provisioning/dashboards/FritzBox.json
|
0
R
alpine/config-files/grafana/provisioning/dashboards/dashboards.yml -> config-files/grafana/provisioning/dashboards/dashboards.yml
|
0
R
alpine/config-files/grafana/provisioning/dashboards/node-exporter.json -> config-files/grafana/provisioning/dashboards/home.json
|
0
R
alpine/config-files/grafana/provisioning/dashboards/node-exporter.json -> config-files/grafana/provisioning/dashboards/node-exporter.json
|
0
A
config-files/grafana/provisioning/dashboards/parkplaetze-kiel.jsom
|
171
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
R
alpine/config-files/grafana/provisioning/datasources/datasources.yml -> config-files/grafana/provisioning/datasources/datasources.yml
|
0
R
alpine/config-files/pleroma/config.exs -> config-files/pleroma/config.exs
|
0
R
alpine/config-files/radicale/config -> config-files/radicale/config
|
0
R
alpine/config-files/riot-web/config.json -> config-files/riot-web/config.json
|
0
R
alpine/config-files/synapse/homeserver.yaml -> config-files/synapse/homeserver.yaml
|
0
R
alpine/config-files/synapse/log.yaml -> config-files/synapse/log.yaml
|
0
A
configuration/joguhrtbecher.yml
|
33
+++++++++++++++++++++++++++++++++
A
configuration/taurus.yml
|
157
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
configuration/wanderduene.yml
|
413
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
inventory
|
13
+++++++++++++
A
lookup_plugins/diskcache.py
|
2437
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
playbook-router.yml
|
306
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
playbook-servers.yml
|
85
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/acme-redirect/files/awall-rule.json
|
13
+++++++++++++
A
roles/acme-redirect/tasks/main.yml
|
234
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/acme-redirect/templates/acme-redirect-general.conf.j2
|
7
+++++++
A
roles/acme-redirect/templates/acme-redirect.conf.j2
|
16
++++++++++++++++
A
roles/backup/tasks/main.yml
|
12
++++++++++++
A
roles/backup/tasks/wanderduene.yml
|
54
++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/bind/files/awall-rule.json
|
13
+++++++++++++
A
roles/bind/tasks/main.yml
|
214
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/bind/templates/named.conf.j2
|
18
++++++++++++++++++
A
roles/cgit/tasks/main.yml
|
161
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/cgit/templates/nginx-vhost.conf.j2
|
43
+++++++++++++++++++++++++++++++++++++++++++
A
roles/common/files/awall/custom-services.json
|
7
+++++++
R
alpine/config-files/awall/ssh.json -> roles/common/files/awall/ssh.json
|
0
R
alpine/config-files/ferm/ferm.initd -> roles/common/files/ferm.initd
|
0
A
roles/common/files/pacman.conf.patch
|
22
++++++++++++++++++++++
R
alpine/config-files/ssh/sshd_config.patch -> roles/common/files/sshd/alpine-sshd_config.patch
|
0
A
roles/common/files/sshd/archlinux-sshd_config.patch
|
20
++++++++++++++++++++
A
roles/common/files/sudoers.patch
|
19
+++++++++++++++++++
A
roles/common/tasks/firewall-awall.yml
|
86
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/common/tasks/firewall-ferm.yml
|
81
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/common/tasks/main.yml
|
46
++++++++++++++++++++++++++++++++++++++++++++++
A
roles/common/tasks/network_alpine.yml
|
49
+++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/common/tasks/network_general.yml
|
29
+++++++++++++++++++++++++++++
A
roles/common/tasks/network_ip-forwarding.yml
|
42
++++++++++++++++++++++++++++++++++++++++++
A
roles/common/tasks/node-exporter.yml
|
86
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/common/tasks/ntp.yml
|
39
+++++++++++++++++++++++++++++++++++++++
A
roles/common/tasks/packages.yml
|
70
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/common/tasks/sshd.yml
|
141
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/common/tasks/sudo.yml
|
49
+++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/common/tasks/timezone.yml
|
29
+++++++++++++++++++++++++++++
A
roles/common/tasks/users.yml
|
34
++++++++++++++++++++++++++++++++++
A
roles/common/templates/awall-baseconfig.yaml.j2
|
28
++++++++++++++++++++++++++++
A
roles/common/templates/hosts.conf.j2
|
12
++++++++++++
A
roles/common/templates/interfaces.conf.j2
|
63
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/common/templates/repositories.j2
|
11
+++++++++++
A
roles/common/templates/resolv.conf.j2
|
10
++++++++++
A
roles/common/vars/main.yml
|
14
++++++++++++++
A
roles/dnsmasq/tasks/main.yml
|
117
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/dnsmasq/templates/dnsmasq.conf.j2
|
106
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/frp/tasks/frpc.yml
|
64
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/frp/tasks/frpc_checks.yml
|
38
++++++++++++++++++++++++++++++++++++++
A
roles/frp/tasks/frps.yml
|
75
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/frp/tasks/frps_checks.yml
|
48
++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/frp/tasks/frps_nginx.yml
|
24
++++++++++++++++++++++++
A
roles/frp/tasks/main.yml
|
34
++++++++++++++++++++++++++++++++++
A
roles/frp/templates/awall-rule.json.j2
|
14
++++++++++++++
A
roles/frp/templates/frpc.conf.j2
|
38
++++++++++++++++++++++++++++++++++++++
A
roles/frp/templates/frps.confd.j2
|
6
++++++
A
roles/frp/templates/frps.ini.j2
|
10
++++++++++
A
roles/frp/templates/nginx-vhost.conf.j2
|
37
+++++++++++++++++++++++++++++++++++++
R
alpine/config-files/gitolite/gitolite.rc.patch -> roles/gitolite/files/gitolite.rc.patch
|
0
A
roles/gitolite/tasks/main.yml
|
107
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/grafana/tasks/checks.yml
|
34
++++++++++++++++++++++++++++++++++
A
roles/grafana/tasks/configure.yml
|
23
+++++++++++++++++++++++
A
roles/grafana/tasks/install.yml
|
17
+++++++++++++++++
A
roles/grafana/tasks/main.yml
|
38
++++++++++++++++++++++++++++++++++++++
A
roles/grafana/tasks/nginx.yml
|
23
+++++++++++++++++++++++
A
roles/grafana/tasks/remove.yml
|
41
+++++++++++++++++++++++++++++++++++++++++
A
roles/grafana/tasks/start.yml
|
18
++++++++++++++++++
A
roles/grafana/templates/nginx-vhost.conf.j2
|
33
+++++++++++++++++++++++++++++++++
A
roles/hostapd/tasks/main.yml
|
119
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/hostapd/templates/hostapd.conf.j2
|
27
+++++++++++++++++++++++++++
A
roles/kawaidesu.ansible_networkd/CHANGELOG.md
|
13
+++++++++++++
A
roles/kawaidesu.ansible_networkd/LICENSE
|
21
+++++++++++++++++++++
A
roles/kawaidesu.ansible_networkd/README.md
|
74
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/kawaidesu.ansible_networkd/defaults/main.yml
|
6
++++++
A
roles/kawaidesu.ansible_networkd/handlers/main.yml
|
17
+++++++++++++++++
A
roles/kawaidesu.ansible_networkd/meta/.galaxy_install_info
|
2
++
A
roles/kawaidesu.ansible_networkd/meta/main.yml
|
30
++++++++++++++++++++++++++++++
A
roles/kawaidesu.ansible_networkd/tasks/deploy_configs.yml
|
34
++++++++++++++++++++++++++++++++++
A
roles/kawaidesu.ansible_networkd/tasks/main.yml
|
30
++++++++++++++++++++++++++++++
A
roles/kawaidesu.ansible_networkd/tasks/remove_unmanaged.yml
|
20
++++++++++++++++++++
A
roles/kawaidesu.ansible_networkd/templates/networkd.j2
|
12
++++++++++++
A
roles/kawaidesu.ansible_networkd/templates/resolv.conf.j2
|
4
++++
A
roles/maddy/files/awall-rule.json
|
37
+++++++++++++++++++++++++++++++++++++
R
alpine/config-files/maddy/maddy-service.patch -> roles/maddy/files/maddy-service.patch
|
0
A
roles/maddy/tasks/checks.yml
|
20
++++++++++++++++++++
A
roles/maddy/tasks/configure.yml
|
32
++++++++++++++++++++++++++++++++
A
roles/maddy/tasks/firewall.yml
|
19
+++++++++++++++++++
A
roles/maddy/tasks/install.yml
|
9
+++++++++
A
roles/maddy/tasks/main.yml
|
32
++++++++++++++++++++++++++++++++
A
roles/maddy/tasks/remove.yml
|
45
+++++++++++++++++++++++++++++++++++++++++++++
A
roles/maddy/tasks/start.yml
|
9
+++++++++
A
roles/maddy/templates/maddy.conf.j2
|
189
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/nginx/files/awall-rule.json
|
19
+++++++++++++++++++
A
roles/nginx/files/awall-rule_httpsOnly.json
|
13
+++++++++++++
A
roles/nginx/files/nginx.conf
|
59
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/nginx/files/proxy_settings.conf
|
20
++++++++++++++++++++
A
roles/nginx/files/ssl_settings.conf
|
25
+++++++++++++++++++++++++
A
roles/nginx/tasks/main.yml
|
242
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/nginx/templates/vhost.conf.j2
|
84
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/oeffi-web/tasks/main.yml
|
164
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/oeffi-web/templates/nginx-vhost.conf.j2
|
51
+++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/oeffi-web/templates/oeffi-web.initd.j2
|
36
++++++++++++++++++++++++++++++++++++
A
roles/oeffisearch/tasks/main.yml
|
163
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/oeffisearch/templates/nginx-vhost.conf.j2
|
52
++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/oeffisearch/templates/oeffisearch.initd.j2
|
35
+++++++++++++++++++++++++++++++++++
A
roles/passwordstore/tasks/main.yml
|
6
++++++
A
roles/pleroma/meta/main.yml
|
6
++++++
A
roles/pleroma/tasks/checks.yml
|
47
+++++++++++++++++++++++++++++++++++++++++++++++
A
roles/pleroma/tasks/configure.yml
|
17
+++++++++++++++++
A
roles/pleroma/tasks/install.yml
|
9
+++++++++
A
roles/pleroma/tasks/main.yml
|
37
+++++++++++++++++++++++++++++++++++++
A
roles/pleroma/tasks/nginx.yml
|
14
++++++++++++++
A
roles/pleroma/tasks/remove.yml
|
0
A
roles/pleroma/tasks/start.yml
|
9
+++++++++
A
roles/pleroma/templates/nginx-vhost.conf.j2
|
57
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/postgresql/tasks/main.yml
|
84
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/prometheus/tasks/checks.yml
|
34
++++++++++++++++++++++++++++++++++
A
roles/prometheus/tasks/configure.yml
|
6
++++++
A
roles/prometheus/tasks/install.yml
|
17
+++++++++++++++++
A
roles/prometheus/tasks/main.yml
|
38
++++++++++++++++++++++++++++++++++++++
A
roles/prometheus/tasks/nginx.yml
|
24
++++++++++++++++++++++++
A
roles/prometheus/tasks/remove.yml
|
40
++++++++++++++++++++++++++++++++++++++++
A
roles/prometheus/tasks/start.yml
|
17
+++++++++++++++++
A
roles/prometheus/templates/nginx-vhost.conf.j2
|
33
+++++++++++++++++++++++++++++++++
A
roles/radicale/tasks/checks.yml
|
45
+++++++++++++++++++++++++++++++++++++++++++++
A
roles/radicale/tasks/configure.yml
|
17
+++++++++++++++++
A
roles/radicale/tasks/install.yml
|
17
+++++++++++++++++
A
roles/radicale/tasks/main.yml
|
33
+++++++++++++++++++++++++++++++++
A
roles/radicale/tasks/nginx.yml
|
23
+++++++++++++++++++++++
A
roles/radicale/tasks/remove.yml
|
40
++++++++++++++++++++++++++++++++++++++++
A
roles/radicale/tasks/start.yml
|
17
+++++++++++++++++
A
roles/radicale/templates/nginx-vhost.conf.j2
|
39
+++++++++++++++++++++++++++++++++++++++
A
roles/rest-server/tasks/checks.yml
|
45
+++++++++++++++++++++++++++++++++++++++++++++
A
roles/rest-server/tasks/configure.yml
|
25
+++++++++++++++++++++++++
A
roles/rest-server/tasks/install.yml
|
17
+++++++++++++++++
A
roles/rest-server/tasks/main.yml
|
34
++++++++++++++++++++++++++++++++++
A
roles/rest-server/tasks/nginx.yml
|
33
+++++++++++++++++++++++++++++++++
A
roles/rest-server/tasks/remove.yml
|
44
++++++++++++++++++++++++++++++++++++++++++++
A
roles/rest-server/tasks/start.yml
|
17
+++++++++++++++++
A
roles/rest-server/templates/nginx-vhost.conf.j2
|
45
+++++++++++++++++++++++++++++++++++++++++++++
A
roles/rest-server/templates/openrc-service.j2
|
20
++++++++++++++++++++
A
roles/rest-server/templates/systemd-service.j2
|
27
+++++++++++++++++++++++++++
A
roles/synapse/meta/main.yaml
|
4
++++
A
roles/synapse/tasks/checks.yml
|
48
++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/synapse/tasks/configure.yml
|
39
+++++++++++++++++++++++++++++++++++++++
A
roles/synapse/tasks/install.yml
|
39
+++++++++++++++++++++++++++++++++++++++
A
roles/synapse/tasks/main.yml
|
34
++++++++++++++++++++++++++++++++++
A
roles/synapse/tasks/nginx.yml
|
24
++++++++++++++++++++++++
A
roles/synapse/tasks/remove.yml
|
41
+++++++++++++++++++++++++++++++++++++++++
A
roles/synapse/tasks/start.yml
|
18
++++++++++++++++++
A
roles/synapse/templates/nginx-vhost.conf.j2
|
73
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
R
alpine/config-files/awall/syncthing.json -> roles/syncthing/files/awall-rule.json
|
0
A
roles/syncthing/tasks/checks.yml
|
36
++++++++++++++++++++++++++++++++++++
A
roles/syncthing/tasks/configure.yml
|
28
++++++++++++++++++++++++++++
A
roles/syncthing/tasks/install.yml
|
17
+++++++++++++++++
A
roles/syncthing/tasks/main.yml
|
34
++++++++++++++++++++++++++++++++++
A
roles/syncthing/tasks/nginx.yml
|
20
++++++++++++++++++++
A
roles/syncthing/tasks/remove.yml
|
84
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
A
roles/syncthing/tasks/start.yml
|
45
+++++++++++++++++++++++++++++++++++++++++++++
A
roles/syncthing/templates/nginx-vhost.conf.j2
|
35
+++++++++++++++++++++++++++++++++++
A
roles/syncthing/templates/openrc-service.j2
|
28
++++++++++++++++++++++++++++
A
roles/websites/tasks/ctu.cx.yml
|
30
++++++++++++++++++++++++++++++
A
roles/websites/tasks/main.yml
|
15
+++++++++++++++
A
roles/websites/tasks/photos.ctu.cx.yml
|
30
++++++++++++++++++++++++++++++
A
roles/websites/tasks/repo.f2k1.de.yml
|
23
+++++++++++++++++++++++
R
alpine/roles/wireguard/tasks/main.yml -> roles/wireguard/tasks/main.yml
|
0
A
scripts/restic-backup-wanderduene.sh
|
20
++++++++++++++++++++
diff --git a/.DS_Store b/.DS_Store  Binary files differ.
diff --git a/.ansible-cache/10.0.0.1 b/.ansible-cache/10.0.0.1
@@ -0,0 +1,496 @@
+{
+    "_ansible_facts_gathered": true,
+    "ansible_all_ipv4_addresses": [
+        "195.39.246.32",
+        "192.168.178.116",
+        "195.39.246.32",
+        "10.0.0.1"
+    ],
+    "ansible_all_ipv6_addresses": [
+        "fe80::5e87:9cff:fe3d:b8e6",
+        "2a0f:4ac0:acab::1",
+        "fe80::56b2:3ff:fe91:cdb",
+        "2a0f:4ac0:acab:0:409a:91ff:fe4e:f431",
+        "2a0f:4ac0:acab::1",
+        "fe80::409a:91ff:fe4e:f431"
+    ],
+    "ansible_apparmor": {
+        "status": "disabled"
+    },
+    "ansible_architecture": "x86_64",
+    "ansible_bios_date": "09/23/2019",
+    "ansible_bios_vendor": "Intel Corp.",
+    "ansible_bios_version": "CHAPLCEL.0038.2019.0923.1810",
+    "ansible_board_asset_tag": "NA",
+    "ansible_board_name": "NUC8CCHB",
+    "ansible_board_serial": "BTCH944005HG",
+    "ansible_board_vendor": "Intel Corporation",
+    "ansible_board_version": "K44767-502",
+    "ansible_brlan": {
+        "active": true,
+        "device": "brlan",
+        "id": "8000.429a914ef431",
+        "interfaces": [
+            "wlp3s0",
+            "enp2s0.5"
+        ],
+        "ipv4": {
+            "address": "195.39.246.32",
+            "broadcast": "195.39.246.47",
+            "netmask": "255.255.255.240",
+            "network": "195.39.246.32"
+        },
+        "ipv4_secondaries": [
+            {
+                "address": "10.0.0.1",
+                "broadcast": "10.0.0.255",
+                "netmask": "255.255.255.0",
+                "network": "10.0.0.0"
+            }
+        ],
+        "ipv6": [
+            {
+                "address": "2a0f:4ac0:acab:0:409a:91ff:fe4e:f431",
+                "prefix": "64",
+                "scope": "global"
+            },
+            {
+                "address": "2a0f:4ac0:acab::1",
+                "prefix": "48",
+                "scope": "global"
+            },
+            {
+                "address": "fe80::409a:91ff:fe4e:f431",
+                "prefix": "64",
+                "scope": "link"
+            }
+        ],
+        "macaddress": "42:9a:91:4e:f4:31",
+        "mtu": 1500,
+        "promisc": false,
+        "speed": 1000,
+        "stp": false,
+        "type": "bridge"
+    },
+    "ansible_chassis_asset_tag": "NA",
+    "ansible_chassis_serial": "NA",
+    "ansible_chassis_vendor": "Intel Corporation",
+    "ansible_chassis_version": "2.0",
+    "ansible_cmdline": {
+        "add_efi_memmap": true,
+        "initrd": "\\initramfs-linux.img",
+        "root": "PARTUUID=2a168141-078b-4c70-87a0-c132d7ae7b38",
+        "rootfstype": "ext4"
+    },
+    "ansible_date_time": {
+        "date": "2020-12-12",
+        "day": "12",
+        "epoch": "1607778578",
+        "hour": "14",
+        "iso8601": "2020-12-12T13:09:38Z",
+        "iso8601_basic": "20201212T140938071610",
+        "iso8601_basic_short": "20201212T140938",
+        "iso8601_micro": "2020-12-12T13:09:38.071610Z",
+        "minute": "09",
+        "month": "12",
+        "second": "38",
+        "time": "14:09:38",
+        "tz": "CET",
+        "tz_offset": "+0100",
+        "weekday": "Saturday",
+        "weekday_number": "6",
+        "weeknumber": "49",
+        "year": "2020"
+    },
+    "ansible_default_ipv4": {
+        "address": "192.168.178.116",
+        "alias": "enp2s0",
+        "broadcast": "192.168.178.255",
+        "gateway": "192.168.178.1",
+        "interface": "enp2s0",
+        "macaddress": "54:b2:03:91:0c:db",
+        "mtu": 1500,
+        "netmask": "255.255.255.0",
+        "network": "192.168.178.0",
+        "type": "ether"
+    },
+    "ansible_default_ipv6": {},
+    "ansible_device_links": {
+        "ids": {
+            "nvme0n1": [
+                "nvme-INTEL_SSDPEKNW010T8_PHNH9210020X1P0B",
+                "nvme-eui.0000000001000000e4d25c5703e25001"
+            ],
+            "nvme0n1p1": [
+                "nvme-INTEL_SSDPEKNW010T8_PHNH9210020X1P0B-part1",
+                "nvme-eui.0000000001000000e4d25c5703e25001-part1"
+            ],
+            "nvme0n1p2": [
+                "nvme-INTEL_SSDPEKNW010T8_PHNH9210020X1P0B-part2",
+                "nvme-eui.0000000001000000e4d25c5703e25001-part2"
+            ]
+        },
+        "labels": {},
+        "masters": {},
+        "uuids": {
+            "nvme0n1p1": [
+                "1344-D403"
+            ],
+            "nvme0n1p2": [
+                "53f739d1-5668-422e-81b5-34c1f60ecba8"
+            ]
+        }
+    },
+    "ansible_devices": {
+        "nvme0n1": {
+            "holders": [],
+            "host": "Non-Volatile memory controller: Intel Corporation SSD 660P Series (rev 03)",
+            "links": {
+                "ids": [
+                    "nvme-INTEL_SSDPEKNW010T8_PHNH9210020X1P0B",
+                    "nvme-eui.0000000001000000e4d25c5703e25001"
+                ],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": "INTEL SSDPEKNW010T8",
+            "partitions": {
+                "nvme0n1p1": {
+                    "holders": [],
+                    "links": {
+                        "ids": [
+                            "nvme-INTEL_SSDPEKNW010T8_PHNH9210020X1P0B-part1",
+                            "nvme-eui.0000000001000000e4d25c5703e25001-part1"
+                        ],
+                        "labels": [],
+                        "masters": [],
+                        "uuids": [
+                            "1344-D403"
+                        ]
+                    },
+                    "sectors": "409600",
+                    "sectorsize": 512,
+                    "size": "200.00 MB",
+                    "start": "2048",
+                    "uuid": "1344-D403"
+                },
+                "nvme0n1p2": {
+                    "holders": [],
+                    "links": {
+                        "ids": [
+                            "nvme-INTEL_SSDPEKNW010T8_PHNH9210020X1P0B-part2",
+                            "nvme-eui.0000000001000000e4d25c5703e25001-part2"
+                        ],
+                        "labels": [],
+                        "masters": [],
+                        "uuids": [
+                            "53f739d1-5668-422e-81b5-34c1f60ecba8"
+                        ]
+                    },
+                    "sectors": "1999997583",
+                    "sectorsize": 512,
+                    "size": "953.67 GB",
+                    "start": "411648",
+                    "uuid": "53f739d1-5668-422e-81b5-34c1f60ecba8"
+                }
+            },
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "none",
+            "sectors": "2000409264",
+            "sectorsize": "512",
+            "size": "953.87 GB",
+            "support_discard": "512",
+            "vendor": null,
+            "virtual": 1
+        }
+    },
+    "ansible_distribution": "Archlinux",
+    "ansible_distribution_file_path": "/etc/arch-release",
+    "ansible_distribution_file_variety": "Archlinux",
+    "ansible_distribution_major_version": "NA",
+    "ansible_distribution_release": "NA",
+    "ansible_distribution_version": "NA",
+    "ansible_dns": {
+        "nameservers": [
+            "1.1.1.1",
+            "8.8.8.8"
+        ],
+        "search": [
+            "ctu.cx"
+        ]
+    },
+    "ansible_domain": "localdomain",
+    "ansible_effective_group_id": 0,
+    "ansible_effective_user_id": 0,
+    "ansible_enp2s0": {
+        "active": true,
+        "device": "enp2s0",
+        "ipv4": {
+            "address": "192.168.178.116",
+            "broadcast": "192.168.178.255",
+            "netmask": "255.255.255.0",
+            "network": "192.168.178.0"
+        },
+        "ipv6": [
+            {
+                "address": "fe80::56b2:3ff:fe91:cdb",
+                "prefix": "64",
+                "scope": "link"
+            }
+        ],
+        "macaddress": "54:b2:03:91:0c:db",
+        "module": "igb",
+        "mtu": 1500,
+        "pciid": "0000:02:00.0",
+        "promisc": true,
+        "speed": 1000,
+        "type": "ether"
+    },
+    "ansible_enp2s0.5": {
+        "active": true,
+        "device": "enp2s0.5",
+        "macaddress": "54:b2:03:91:0c:db",
+        "mtu": 1500,
+        "promisc": true,
+        "speed": 1000,
+        "type": "ether"
+    },
+    "ansible_env": {
+        "DBUS_SESSION_BUS_ADDRESS": "unix:path=/run/user/0/bus",
+        "HOME": "/root",
+        "LOGNAME": "root",
+        "MAIL": "/var/spool/mail/root",
+        "MOTD_SHOWN": "pam",
+        "PATH": "/usr/local/sbin:/usr/local/bin:/usr/bin",
+        "PWD": "/root",
+        "SHELL": "/bin/bash",
+        "SHLVL": "1",
+        "SSH_CLIENT": "195.39.246.33 56723 22",
+        "SSH_CONNECTION": "195.39.246.33 56723 10.0.0.1 22",
+        "USER": "root",
+        "XDG_RUNTIME_DIR": "/run/user/0",
+        "XDG_SESSION_CLASS": "user",
+        "XDG_SESSION_ID": "5",
+        "XDG_SESSION_TYPE": "tty",
+        "_": "/usr/bin/python"
+    },
+    "ansible_fibre_channel_wwn": [],
+    "ansible_fips": false,
+    "ansible_form_factor": "Mini PC",
+    "ansible_fqdn": "localhost.localdomain",
+    "ansible_hostname": "lollo",
+    "ansible_hostnqn": "",
+    "ansible_interfaces": [
+        "lo",
+        "brlan",
+        "enp2s0",
+        "wlp3s0",
+        "wg-pbb",
+        "enp2s0.5"
+    ],
+    "ansible_is_chroot": false,
+    "ansible_iscsi_iqn": "",
+    "ansible_kernel": "5.9.13-arch1-1",
+    "ansible_kernel_version": "#1 SMP PREEMPT Tue, 08 Dec 2020 12:09:55 +0000",
+    "ansible_lo": {
+        "active": true,
+        "device": "lo",
+        "ipv4": {
+            "address": "127.0.0.1",
+            "broadcast": "",
+            "netmask": "255.0.0.0",
+            "network": "127.0.0.0"
+        },
+        "ipv6": [
+            {
+                "address": "::1",
+                "prefix": "128",
+                "scope": "host"
+            }
+        ],
+        "mtu": 65536,
+        "promisc": false,
+        "type": "loopback"
+    },
+    "ansible_local": {},
+    "ansible_lsb": {},
+    "ansible_machine": "x86_64",
+    "ansible_machine_id": "ce683ab7433a4053ab099133455a6878",
+    "ansible_memfree_mb": 3294,
+    "ansible_memory_mb": {
+        "nocache": {
+            "free": 3502,
+            "used": 246
+        },
+        "real": {
+            "free": 3294,
+            "total": 3748,
+            "used": 454
+        },
+        "swap": {
+            "cached": 0,
+            "free": 0,
+            "total": 0,
+            "used": 0
+        }
+    },
+    "ansible_memtotal_mb": 3748,
+    "ansible_mounts": [
+        {
+            "block_available": 232577931,
+            "block_size": 4096,
+            "block_total": 245813434,
+            "block_used": 13235503,
+            "device": "/dev/nvme0n1p2",
+            "fstype": "ext4",
+            "inode_available": 62454061,
+            "inode_total": 62504960,
+            "inode_used": 50899,
+            "mount": "/",
+            "options": "rw,relatime",
+            "size_available": 952639205376,
+            "size_total": 1006851825664,
+            "uuid": "53f739d1-5668-422e-81b5-34c1f60ecba8"
+        },
+        {
+            "block_available": 39888,
+            "block_size": 4096,
+            "block_total": 51145,
+            "block_used": 11257,
+            "device": "/dev/nvme0n1p1",
+            "fstype": "vfat",
+            "inode_available": 0,
+            "inode_total": 0,
+            "inode_used": 0,
+            "mount": "/boot",
+            "options": "rw,relatime,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,utf8,errors=remount-ro",
+            "size_available": 163381248,
+            "size_total": 209489920,
+            "uuid": "1344-D403"
+        }
+    ],
+    "ansible_nodename": "lollo",
+    "ansible_os_family": "Archlinux",
+    "ansible_pkg_mgr": "pacman",
+    "ansible_proc_cmdline": {
+        "add_efi_memmap": true,
+        "initrd": "\\initramfs-linux.img",
+        "root": "PARTUUID=2a168141-078b-4c70-87a0-c132d7ae7b38",
+        "rootfstype": "ext4"
+    },
+    "ansible_processor": [
+        "0",
+        "GenuineIntel",
+        "Intel(R) Celeron(R) CPU N3350 @ 1.10GHz",
+        "1",
+        "GenuineIntel",
+        "Intel(R) Celeron(R) CPU N3350 @ 1.10GHz"
+    ],
+    "ansible_processor_cores": 2,
+    "ansible_processor_count": 1,
+    "ansible_processor_nproc": 2,
+    "ansible_processor_threads_per_core": 1,
+    "ansible_processor_vcpus": 2,
+    "ansible_product_name": "NUC8CCHK",
+    "ansible_product_serial": "BTCH944005HG",
+    "ansible_product_uuid": "36dc2efc-ac6c-f534-182b-54b203910cdb",
+    "ansible_product_version": "K44798-503",
+    "ansible_python": {
+        "executable": "/usr/bin/python",
+        "has_sslcontext": true,
+        "type": "cpython",
+        "version": {
+            "major": 3,
+            "micro": 0,
+            "minor": 9,
+            "releaselevel": "final",
+            "serial": 0
+        },
+        "version_info": [
+            3,
+            9,
+            0,
+            "final",
+            0
+        ]
+    },
+    "ansible_python_version": "3.9.0",
+    "ansible_real_group_id": 0,
+    "ansible_real_user_id": 0,
+    "ansible_selinux": {
+        "status": "Missing selinux Python library"
+    },
+    "ansible_selinux_python_present": false,
+    "ansible_service_mgr": "systemd",
+    "ansible_ssh_host_key_dsa_public": "AAAAB3NzaC1kc3MAAACBALCEBsGVpSPxEJXhFW2uL/lwG3gl/bTJu8ZhylOJbnADkTfm3NWxl/xGyrxJHl4/YG+1AnNKNLoBCXklg2eD0bHjfgcc2c2+jpjiTYNHoy3ew65r1hBE32c9OgvsBrP5ERhHGcoOvl05MSWOkptOu9mdSW7D+CxlCdlhfsPXRp4jAAAAFQCp7fMSPRQLnoiexqneUo2JaK0GnwAAAIBeg4JDNezapWG3mZ9Jvau/L0MkgThIWTD0dAEeGcDrIb4sV+iLAIN+Xa+TPE/Cim3rE7wa7gW9QBzeo548ctksW7m9kNV8OXC+6TgoucWiyBsSOCNUybwZcvzs2/5LCIeBDPExhP/p1hp+33qU4mY6T/YVGKtCZKGKi+to1jSIjwAAAIB8G30VjRx1NiTU5eF5aWLFSUUTPhxgDhNY4IIcFO9psOL3Le+eqhwD7HPrD13lUByNTBZdZKAQBO52wcChD9RE0XeKmpKgxDmBIWsvZXcmHEjr7cwcSpkfXaLStXFzHJDid+EDEfJGBPr1s9gfryUcVfg2dIWkTz3jMvSW7rGRxg==",
+    "ansible_ssh_host_key_dsa_public_keytype": "ssh-dss",
+    "ansible_ssh_host_key_ecdsa_public": "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFqEmVRlfGwmURdzmg2V9T8XWgBlNucn+tbs3DWR/x+z3akfsNoQQHZXRaGJhWT1DtJ0K0TnWuBCsXM0prE6bmo=",
+    "ansible_ssh_host_key_ecdsa_public_keytype": "ecdsa-sha2-nistp256",
+    "ansible_ssh_host_key_ed25519_public": "AAAAC3NzaC1lZDI1NTE5AAAAIJx0TO8SzcUiSXQNqC1JkhLaG9U9khXgsnG6wI3oHeTY",
+    "ansible_ssh_host_key_ed25519_public_keytype": "ssh-ed25519",
+    "ansible_ssh_host_key_rsa_public": "AAAAB3NzaC1yc2EAAAADAQABAAABgQC/QUfIIUATbB7NPZCDwR5o+NGeUc8mvuutPaGf5aIkSVG698zWVhmd8E+GNieoa4GzopZ8Ad1X3QApPZ0LeJHSCi5ErosP9yeteOAZQQFN2FtxCmlIOfIhjd6jGlPpns4/3LvSAiNv9LaVA39UpSvNEnSCif9H7vALznSAumeFBYzmsNIujAlJe1ENwMATD1Aeot7JKzqgb7GIGrBh03NTnFSrUZf1xVGKhbVactcPMr14ZFF3saUq+SFZ5lR1zj6WFkWEO2/d3Gkz9lNpQvRJjLHiABcLZbfWASMIeAtdaIont8NfejlLRcNs0ap+H9GTXWoi4aRUB2mJ/z7irxVDhhEe1L65ExtlIpAMCUlqgzZh5+Of8ghDLtsDHNanaxK/w1aBdkEM2suVl7kalS/o5rdZHXpE1GZn3/ATXLYqiVNWtVJ+Z+5q0DPjatJgLWZjAfKnDssRsNbuggwdO3uZ/Pr9J5Wwa4iRS2c7+tySlGorTK9Y8k+Q/+jQtwMBy00=",
+    "ansible_ssh_host_key_rsa_public_keytype": "ssh-rsa",
+    "ansible_swapfree_mb": 0,
+    "ansible_swaptotal_mb": 0,
+    "ansible_system": "Linux",
+    "ansible_system_capabilities": [],
+    "ansible_system_capabilities_enforced": "False",
+    "ansible_system_vendor": "Intel(R) Client Systems",
+    "ansible_uptime_seconds": 63008,
+    "ansible_user_dir": "/root",
+    "ansible_user_gecos": "",
+    "ansible_user_gid": 0,
+    "ansible_user_id": "root",
+    "ansible_user_shell": "/bin/bash",
+    "ansible_user_uid": 0,
+    "ansible_userspace_architecture": "x86_64",
+    "ansible_userspace_bits": "64",
+    "ansible_virtualization_role": "host",
+    "ansible_virtualization_type": "kvm",
+    "ansible_wg_pbb": {
+        "active": true,
+        "device": "wg-pbb",
+        "ipv4": {
+            "address": "195.39.246.32",
+            "broadcast": "",
+            "netmask": "255.255.255.255",
+            "network": "195.39.246.32"
+        },
+        "ipv6": [
+            {
+                "address": "2a0f:4ac0:acab::1",
+                "prefix": "128",
+                "scope": "global"
+            }
+        ],
+        "mtu": 1472,
+        "promisc": false,
+        "type": "tunnel"
+    },
+    "ansible_wlp3s0": {
+        "active": true,
+        "device": "wlp3s0",
+        "ipv6": [
+            {
+                "address": "fe80::5e87:9cff:fe3d:b8e6",
+                "prefix": "64",
+                "scope": "link"
+            }
+        ],
+        "macaddress": "5c:87:9c:3d:b8:e6",
+        "module": "iwlwifi",
+        "mtu": 1500,
+        "pciid": "0000:03:00.0",
+        "promisc": true,
+        "type": "ether"
+    },
+    "discovered_interpreter_python": "/usr/bin/python",
+    "gather_subset": [
+        "all"
+    ],
+    "module_setup": true
+}+
\ No newline at end of file
diff --git a/.ansible-cache/taurus.ctu.cx b/.ansible-cache/taurus.ctu.cx
@@ -0,0 +1,959 @@
+{
+    "_ansible_facts_gathered": true,
+    "ansible_all_ipv4_addresses": [
+        "37.221.196.131"
+    ],
+    "ansible_all_ipv6_addresses": [
+        "2a03:4000:9:f8::1",
+        "fe80::80f:6cff:fe1c:10aa"
+    ],
+    "ansible_apparmor": {
+        "status": "disabled"
+    },
+    "ansible_architecture": "x86_64",
+    "ansible_bios_date": "04/01/2014",
+    "ansible_bios_vendor": "SeaBIOS",
+    "ansible_bios_version": "1.13.0-1~nc9+4",
+    "ansible_board_asset_tag": "NA",
+    "ansible_board_name": "NA",
+    "ansible_board_serial": "NA",
+    "ansible_board_vendor": "NA",
+    "ansible_board_version": "NA",
+    "ansible_chassis_asset_tag": "NA",
+    "ansible_chassis_serial": "NA",
+    "ansible_chassis_vendor": "QEMU",
+    "ansible_chassis_version": "pc-i440fx-4.2",
+    "ansible_cmdline": {
+        "BOOT_IMAGE": "vmlinuz-lts",
+        "initrd": "initramfs-lts",
+        "modules": "sd-mod,usb-storage,ext4",
+        "nomodeset": true,
+        "quiet": true,
+        "root": "UUID=269f9b0d-410d-4877-b670-e70dd551f2ae",
+        "rootfstype": "ext4"
+    },
+    "ansible_date_time": {
+        "date": "2020-12-12",
+        "day": "12",
+        "epoch": "1607775003",
+        "hour": "12",
+        "iso8601": "2020-12-12T11:10:03Z",
+        "iso8601_basic": "20201212T121003683542",
+        "iso8601_basic_short": "20201212T121003",
+        "iso8601_micro": "2020-12-12T11:10:03.683542Z",
+        "minute": "10",
+        "month": "12",
+        "second": "03",
+        "time": "12:10:03",
+        "tz": "CET",
+        "tz_offset": "+0100",
+        "weekday": "Saturday",
+        "weekday_number": "6",
+        "weeknumber": "49",
+        "year": "2020"
+    },
+    "ansible_default_ipv4": {
+        "address": "37.221.196.131",
+        "alias": "eth0",
+        "broadcast": "",
+        "gateway": "37.221.196.1",
+        "interface": "eth0",
+        "macaddress": "0a:0f:6c:1c:10:aa",
+        "mtu": 1500,
+        "netmask": "255.255.255.0",
+        "network": "37.221.196.0",
+        "type": "ether"
+    },
+    "ansible_default_ipv6": {
+        "address": "2a03:4000:9:f8::1",
+        "gateway": "fe80::1",
+        "interface": "eth0",
+        "macaddress": "0a:0f:6c:1c:10:aa",
+        "mtu": 1500,
+        "prefix": "64",
+        "scope": "global",
+        "type": "ether"
+    },
+    "ansible_device_links": {
+        "ids": {},
+        "labels": {},
+        "masters": {},
+        "uuids": {}
+    },
+    "ansible_devices": {
+        "loop0": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "1",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "mq-deadline",
+            "sectors": "0",
+            "sectorsize": "512",
+            "size": "0.00 Bytes",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "loop1": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "1",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "mq-deadline",
+            "sectors": "0",
+            "sectorsize": "512",
+            "size": "0.00 Bytes",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "loop2": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "1",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "mq-deadline",
+            "sectors": "0",
+            "sectorsize": "512",
+            "size": "0.00 Bytes",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "loop3": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "1",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "mq-deadline",
+            "sectors": "0",
+            "sectorsize": "512",
+            "size": "0.00 Bytes",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "loop4": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "1",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "mq-deadline",
+            "sectors": "0",
+            "sectorsize": "512",
+            "size": "0.00 Bytes",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "loop5": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "1",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "mq-deadline",
+            "sectors": "0",
+            "sectorsize": "512",
+            "size": "0.00 Bytes",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "loop6": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "1",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "mq-deadline",
+            "sectors": "0",
+            "sectorsize": "512",
+            "size": "0.00 Bytes",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "loop7": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "1",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "mq-deadline",
+            "sectors": "0",
+            "sectorsize": "512",
+            "size": "0.00 Bytes",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram0": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram1": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram10": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram11": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram12": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram13": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram14": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram15": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram2": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram3": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram4": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram5": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram6": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram7": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram8": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram9": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "sr0": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": "QEMU DVD-ROM",
+            "partitions": {},
+            "removable": "1",
+            "rotational": "1",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "mq-deadline",
+            "sectors": "2097151",
+            "sectorsize": "512",
+            "size": "1024.00 MB",
+            "support_discard": "0",
+            "vendor": "QEMU",
+            "virtual": 1
+        },
+        "vda": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {
+                "vda1": {
+                    "holders": [],
+                    "links": {
+                        "ids": [],
+                        "labels": [],
+                        "masters": [],
+                        "uuids": []
+                    },
+                    "sectors": "204800",
+                    "sectorsize": 512,
+                    "size": "100.00 MB",
+                    "start": "2048",
+                    "uuid": null
+                },
+                "vda2": {
+                    "holders": [],
+                    "links": {
+                        "ids": [],
+                        "labels": [],
+                        "masters": [],
+                        "uuids": []
+                    },
+                    "sectors": "8388608",
+                    "sectorsize": 512,
+                    "size": "4.00 GB",
+                    "start": "206848",
+                    "uuid": null
+                },
+                "vda3": {
+                    "holders": [],
+                    "links": {
+                        "ids": [],
+                        "labels": [],
+                        "masters": [],
+                        "uuids": []
+                    },
+                    "sectors": "662493184",
+                    "sectorsize": 512,
+                    "size": "315.90 GB",
+                    "start": "8595456",
+                    "uuid": null
+                }
+            },
+            "removable": "0",
+            "rotational": "1",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "mq-deadline",
+            "sectors": "671088640",
+            "sectorsize": "512",
+            "size": "320.00 GB",
+            "support_discard": "512",
+            "vendor": "0x1af4",
+            "virtual": 1
+        }
+    },
+    "ansible_distribution": "Alpine",
+    "ansible_distribution_file_parsed": true,
+    "ansible_distribution_file_path": "/etc/alpine-release",
+    "ansible_distribution_file_variety": "Alpine",
+    "ansible_distribution_major_version": "3",
+    "ansible_distribution_release": "NA",
+    "ansible_distribution_version": "3.13.0_alpha20200917",
+    "ansible_dns": {
+        "nameservers": [
+            "1.1.1.1",
+            "8.8.8.8"
+        ],
+        "search": [
+            "ctu.cx"
+        ]
+    },
+    "ansible_domain": "",
+    "ansible_effective_group_id": 0,
+    "ansible_effective_user_id": 0,
+    "ansible_env": {
+        "HOME": "/root",
+        "LOGNAME": "root",
+        "MAIL": "/var/mail/root",
+        "PATH": "/bin:/usr/bin:/sbin:/usr/sbin",
+        "PWD": "/root",
+        "SHELL": "/bin/ash",
+        "SHLVL": "2",
+        "SSH_CLIENT": "2a0f:4ac0:acab:0:f4c9:3010:f020:be6c 55398 22",
+        "SSH_CONNECTION": "2a0f:4ac0:acab:0:f4c9:3010:f020:be6c 55398 2a03:4000:9:f8::1 22",
+        "USER": "root"
+    },
+    "ansible_eth0": {
+        "active": true,
+        "device": "eth0",
+        "ipv4": {
+            "address": "37.221.196.131",
+            "broadcast": "",
+            "netmask": "255.255.255.0",
+            "network": "37.221.196.0"
+        },
+        "ipv6": [
+            {
+                "address": "2a03:4000:9:f8::1",
+                "prefix": "64",
+                "scope": "global"
+            },
+            {
+                "address": "fe80::80f:6cff:fe1c:10aa",
+                "prefix": "64",
+                "scope": "link"
+            }
+        ],
+        "macaddress": "0a:0f:6c:1c:10:aa",
+        "module": "virtio_net",
+        "mtu": 1500,
+        "pciid": "virtio0",
+        "promisc": false,
+        "speed": -1,
+        "type": "ether"
+    },
+    "ansible_fibre_channel_wwn": [],
+    "ansible_fips": false,
+    "ansible_form_factor": "Other",
+    "ansible_fqdn": "localhost",
+    "ansible_hostname": "taurus",
+    "ansible_hostnqn": "",
+    "ansible_interfaces": [
+        "lo",
+        "eth0"
+    ],
+    "ansible_is_chroot": false,
+    "ansible_iscsi_iqn": "",
+    "ansible_kernel": "5.4.82-0-lts",
+    "ansible_kernel_version": "#1-Alpine SMP Wed, 09 Dec 2020 11:57:26 UTC",
+    "ansible_lo": {
+        "active": true,
+        "device": "lo",
+        "ipv4": {
+            "address": "127.0.0.1",
+            "broadcast": "",
+            "netmask": "255.0.0.0",
+            "network": "127.0.0.0"
+        },
+        "ipv6": [
+            {
+                "address": "::1",
+                "prefix": "128",
+                "scope": "host"
+            }
+        ],
+        "mtu": 65536,
+        "promisc": false,
+        "type": "loopback"
+    },
+    "ansible_local": {},
+    "ansible_lsb": {},
+    "ansible_machine": "x86_64",
+    "ansible_memfree_mb": 1707,
+    "ansible_memory_mb": {
+        "nocache": {
+            "free": 2659,
+            "used": 334
+        },
+        "real": {
+            "free": 1707,
+            "total": 2993,
+            "used": 1286
+        },
+        "swap": {
+            "cached": 0,
+            "free": 4095,
+            "total": 4095,
+            "used": 0
+        }
+    },
+    "ansible_memtotal_mb": 2993,
+    "ansible_mounts": [
+        {
+            "block_available": 35761982,
+            "block_size": 4096,
+            "block_total": 81249415,
+            "block_used": 45487433,
+            "device": "/dev/vda3",
+            "fstype": "ext4",
+            "inode_available": 19572150,
+            "inode_total": 20709376,
+            "inode_used": 1137226,
+            "mount": "/",
+            "options": "rw,relatime",
+            "size_available": 146481078272,
+            "size_total": 332797603840,
+            "uuid": "N/A"
+        },
+        {
+            "block_available": 64148,
+            "block_size": 1024,
+            "block_total": 95054,
+            "block_used": 30906,
+            "device": "/dev/vda1",
+            "fstype": "ext4",
+            "inode_available": 25664,
+            "inode_total": 25688,
+            "inode_used": 24,
+            "mount": "/boot",
+            "options": "rw,relatime",
+            "size_available": 65687552,
+            "size_total": 97335296,
+            "uuid": "N/A"
+        }
+    ],
+    "ansible_nodename": "taurus",
+    "ansible_os_family": "Alpine",
+    "ansible_pkg_mgr": "apk",
+    "ansible_proc_cmdline": {
+        "BOOT_IMAGE": "vmlinuz-lts",
+        "initrd": "initramfs-lts",
+        "modules": "sd-mod,usb-storage,ext4",
+        "nomodeset": true,
+        "quiet": true,
+        "root": "UUID=269f9b0d-410d-4877-b670-e70dd551f2ae",
+        "rootfstype": "ext4"
+    },
+    "ansible_processor": [
+        "0",
+        "GenuineIntel",
+        "Intel(R) Xeon(R) CPU E5-2670 v3 @ 2.30GHz"
+    ],
+    "ansible_processor_cores": 1,
+    "ansible_processor_count": 1,
+    "ansible_processor_nproc": 1,
+    "ansible_processor_threads_per_core": 1,
+    "ansible_processor_vcpus": 1,
+    "ansible_product_name": "Standard PC (i440FX + PIIX, 1996)",
+    "ansible_product_serial": "NA",
+    "ansible_product_uuid": "0e3acf31-0635-415f-bff1-170117dc3fea",
+    "ansible_product_version": "pc-i440fx-4.2",
+    "ansible_python": {
+        "executable": "/usr/bin/python3",
+        "has_sslcontext": true,
+        "type": "cpython",
+        "version": {
+            "major": 3,
+            "micro": 6,
+            "minor": 8,
+            "releaselevel": "final",
+            "serial": 0
+        },
+        "version_info": [
+            3,
+            8,
+            6,
+            "final",
+            0
+        ]
+    },
+    "ansible_python_version": "3.8.6",
+    "ansible_real_group_id": 0,
+    "ansible_real_user_id": 0,
+    "ansible_selinux": {
+        "status": "Missing selinux Python library"
+    },
+    "ansible_selinux_python_present": false,
+    "ansible_service_mgr": "openrc",
+    "ansible_ssh_host_key_dsa_public": "AAAAB3NzaC1kc3MAAACBAJ3WWAKX1xyaEQ3ZkhGRScttOVLX1NGtpmloweVEbdTMb50mVOwS5CwyuIrp29xms+VL9ahkTBJKJ8GaZxGv25EAoR8Q6S4xiBFN0Hro4bjh8RDton1r365tersIYZTfLn/NN1SoMnF6lNV5uMUsGf1eEQOMlzRzWbhv+k9o48xBAAAAFQDe7rq35MDDmC8FVHQGoVJSXb5FfQAAAIAA7MCRRnnHqWB7ayfBzDcDWx1wqOlTywmVRTiaJ9eJchKn317mWJSHtvXEgustYMaWaIQEXD3gXXBNCpKGxngRS1AYGV6rpNhWRFgy7zcgNlQ05fcN6xSMufZbwnJjCOFQvrW7yefc6BbqT2RS+u4u8O9yljmRXjZeVpz5qROLHgAAAIEAjtBAmW5SbFkqbgVTj00MbWX560AZTuk41TP6pa8njcEDspRJpcIpfa1wr3PHw1xFNVY781L99J1zpLod3HbaP5WxWvHc36ICHFiD+XfgrLpFpReEKUH34ce0JGmQ6x6zti18H5kxhjOQIN3AJMKWu4xON4yrq/WgrO/upN/ZdzE=",
+    "ansible_ssh_host_key_dsa_public_keytype": "ssh-dss",
+    "ansible_ssh_host_key_ecdsa_public": "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNbudXKUIVHvLtSFJLvZpcwjuVcNsu0fUE+nMRcUSIOPPATvO751+f0Hi99mNhxYVKk6l1E6VGTgqZktweUybWA=",
+    "ansible_ssh_host_key_ecdsa_public_keytype": "ecdsa-sha2-nistp256",
+    "ansible_ssh_host_key_ed25519_public": "AAAAC3NzaC1lZDI1NTE5AAAAIPLgzPRgJjRGtHE30o14eyewpg5MMRKzHAb4+07gfgTZ",
+    "ansible_ssh_host_key_ed25519_public_keytype": "ssh-ed25519",
+    "ansible_ssh_host_key_rsa_public": "AAAAB3NzaC1yc2EAAAADAQABAAABgQCYceZCuRlBcSZT8oiHtfX2qSrXaC9iDs3ftu436OwOaITioQhBQZhcn9ywiBNQAuaJ1BYEM1vz6XRfDxS3Cd7uaZ420S8W/KXzO+GNvynAIbo8iMiYKjv23NEaEDaEGnUcIgG0mTrLpwUZHmcqmCVDGGHXifMwrLTUFXKECk7sKHdsYt/gffo+XIHc+kD8WwBoPg9IK1DN8JRVoZ33xg8ZGD1ZatNj/oreS63Slah4iX4TS/9yYHPFCVCO03xXRgDz91UivKFryHF7gJaeJvA0YUsbSBErZ7ToCRaHfab9Z5VRd6is6QYML7j4TsJqQJdRon3zkjQtB7nLRureBKhidR8Eyudw0K+Iv2/kWYgFhM4eF2tSSyuqLKaaynB6VO3pRTpOWeuTu1H5eiJn+BSK6GrZbzC//Sy6GO9o5EY1N3W8bN7LBFjOOM8esEwN7LYJh8C+6OKZUwh46RR7cth8+JivQ4vfhnH/obvUuz75V0bAvwni9SMN1t60cOeACAc=",
+    "ansible_ssh_host_key_rsa_public_keytype": "ssh-rsa",
+    "ansible_swapfree_mb": 4095,
+    "ansible_swaptotal_mb": 4095,
+    "ansible_system": "Linux",
+    "ansible_system_capabilities": [],
+    "ansible_system_capabilities_enforced": "False",
+    "ansible_system_vendor": "QEMU",
+    "ansible_uptime_seconds": 214714,
+    "ansible_user_dir": "/root",
+    "ansible_user_gecos": "root",
+    "ansible_user_gid": 0,
+    "ansible_user_id": "root",
+    "ansible_user_shell": "/bin/ash",
+    "ansible_user_uid": 0,
+    "ansible_userspace_architecture": "x86_64",
+    "ansible_userspace_bits": "64",
+    "ansible_virtualization_role": "guest",
+    "ansible_virtualization_type": "kvm",
+    "discovered_interpreter_python": "/usr/bin/python3",
+    "gather_subset": [
+        "all"
+    ],
+    "module_setup": true
+}+
\ No newline at end of file
diff --git a/.ansible-cache/wanderduene.ctu.cx b/.ansible-cache/wanderduene.ctu.cx
@@ -0,0 +1,948 @@
+{
+    "_ansible_facts_gathered": true,
+    "ansible_all_ipv4_addresses": [
+        "46.38.253.139"
+    ],
+    "ansible_all_ipv6_addresses": [
+        "2a03:4000:1:45d::1",
+        "fe80::7436:5eff:fe67:3e05"
+    ],
+    "ansible_apparmor": {
+        "status": "disabled"
+    },
+    "ansible_architecture": "x86_64",
+    "ansible_bios_date": "04/01/2014",
+    "ansible_bios_vendor": "SeaBIOS",
+    "ansible_bios_version": "1.13.0-1~nc9+4",
+    "ansible_board_asset_tag": "NA",
+    "ansible_board_name": "NA",
+    "ansible_board_serial": "NA",
+    "ansible_board_vendor": "NA",
+    "ansible_board_version": "NA",
+    "ansible_chassis_asset_tag": "NA",
+    "ansible_chassis_serial": "NA",
+    "ansible_chassis_vendor": "QEMU",
+    "ansible_chassis_version": "pc-i440fx-4.2",
+    "ansible_cmdline": {
+        "BOOT_IMAGE": "vmlinuz-lts",
+        "initrd": "initramfs-lts",
+        "modules": "sd-mod,usb-storage,ext4",
+        "nomodeset": true,
+        "quiet": true,
+        "root": "UUID=fc06e9aa-37fc-45ab-ad89-4f04e8ed78ba",
+        "rootfstype": "ext4"
+    },
+    "ansible_date_time": {
+        "date": "2020-12-12",
+        "day": "12",
+        "epoch": "1607774999",
+        "hour": "12",
+        "iso8601": "2020-12-12T11:09:59Z",
+        "iso8601_basic": "20201212T120959896529",
+        "iso8601_basic_short": "20201212T120959",
+        "iso8601_micro": "2020-12-12T11:09:59.896529Z",
+        "minute": "09",
+        "month": "12",
+        "second": "59",
+        "time": "12:09:59",
+        "tz": "CET",
+        "tz_offset": "+0100",
+        "weekday": "Saturday",
+        "weekday_number": "6",
+        "weeknumber": "49",
+        "year": "2020"
+    },
+    "ansible_default_ipv4": {
+        "address": "46.38.253.139",
+        "alias": "eth0",
+        "broadcast": "",
+        "gateway": "46.38.253.1",
+        "interface": "eth0",
+        "macaddress": "76:36:5e:67:3e:05",
+        "mtu": 1500,
+        "netmask": "255.255.255.0",
+        "network": "46.38.253.0",
+        "type": "ether"
+    },
+    "ansible_default_ipv6": {
+        "address": "2a03:4000:1:45d::1",
+        "gateway": "fe80::1",
+        "interface": "eth0",
+        "macaddress": "76:36:5e:67:3e:05",
+        "mtu": 1500,
+        "prefix": "64",
+        "scope": "global",
+        "type": "ether"
+    },
+    "ansible_device_links": {
+        "ids": {},
+        "labels": {},
+        "masters": {},
+        "uuids": {}
+    },
+    "ansible_devices": {
+        "loop0": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "1",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "mq-deadline",
+            "sectors": "0",
+            "sectorsize": "512",
+            "size": "0.00 Bytes",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "loop1": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "1",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "mq-deadline",
+            "sectors": "0",
+            "sectorsize": "512",
+            "size": "0.00 Bytes",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "loop2": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "1",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "mq-deadline",
+            "sectors": "0",
+            "sectorsize": "512",
+            "size": "0.00 Bytes",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "loop3": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "1",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "mq-deadline",
+            "sectors": "0",
+            "sectorsize": "512",
+            "size": "0.00 Bytes",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "loop4": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "1",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "mq-deadline",
+            "sectors": "0",
+            "sectorsize": "512",
+            "size": "0.00 Bytes",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "loop5": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "1",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "mq-deadline",
+            "sectors": "0",
+            "sectorsize": "512",
+            "size": "0.00 Bytes",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "loop6": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "1",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "mq-deadline",
+            "sectors": "0",
+            "sectorsize": "512",
+            "size": "0.00 Bytes",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "loop7": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "1",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "mq-deadline",
+            "sectors": "0",
+            "sectorsize": "512",
+            "size": "0.00 Bytes",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram0": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram1": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram10": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram11": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram12": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram13": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram14": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram15": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram2": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram3": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram4": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram5": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram6": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram7": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram8": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "ram9": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": null,
+            "partitions": {},
+            "removable": "0",
+            "rotational": "0",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "",
+            "sectors": "8192",
+            "sectorsize": "512",
+            "size": "4.00 MB",
+            "support_discard": "0",
+            "vendor": null,
+            "virtual": 1
+        },
+        "sda": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": "QEMU HARDDISK",
+            "partitions": {
+                "sda1": {
+                    "holders": [],
+                    "links": {
+                        "ids": [],
+                        "labels": [],
+                        "masters": [],
+                        "uuids": []
+                    },
+                    "sectors": "204800",
+                    "sectorsize": 512,
+                    "size": "100.00 MB",
+                    "start": "2048",
+                    "uuid": null
+                },
+                "sda3": {
+                    "holders": [],
+                    "links": {
+                        "ids": [],
+                        "labels": [],
+                        "masters": [],
+                        "uuids": []
+                    },
+                    "sectors": "22861824",
+                    "sectorsize": 512,
+                    "size": "10.90 GB",
+                    "start": "206848",
+                    "uuid": null
+                }
+            },
+            "removable": "0",
+            "rotational": "1",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "mq-deadline",
+            "sectors": "23068672",
+            "sectorsize": "512",
+            "size": "11.00 GB",
+            "support_discard": "4096",
+            "vendor": "QEMU",
+            "virtual": 1
+        },
+        "sr0": {
+            "holders": [],
+            "host": "",
+            "links": {
+                "ids": [],
+                "labels": [],
+                "masters": [],
+                "uuids": []
+            },
+            "model": "QEMU DVD-ROM",
+            "partitions": {},
+            "removable": "1",
+            "rotational": "1",
+            "sas_address": null,
+            "sas_device_handle": null,
+            "scheduler_mode": "mq-deadline",
+            "sectors": "2097151",
+            "sectorsize": "512",
+            "size": "1024.00 MB",
+            "support_discard": "0",
+            "vendor": "QEMU",
+            "virtual": 1
+        }
+    },
+    "ansible_distribution": "Alpine",
+    "ansible_distribution_file_parsed": true,
+    "ansible_distribution_file_path": "/etc/alpine-release",
+    "ansible_distribution_file_variety": "Alpine",
+    "ansible_distribution_major_version": "3",
+    "ansible_distribution_release": "NA",
+    "ansible_distribution_version": "3.13.0_alpha20200917",
+    "ansible_dns": {
+        "nameservers": [
+            "1.1.1.1",
+            "8.8.8.8"
+        ],
+        "search": [
+            "ctu.cx"
+        ]
+    },
+    "ansible_domain": "",
+    "ansible_effective_group_id": 0,
+    "ansible_effective_user_id": 0,
+    "ansible_env": {
+        "HOME": "/root",
+        "LOGNAME": "root",
+        "MAIL": "/var/mail/root",
+        "PATH": "/bin:/usr/bin:/sbin:/usr/sbin",
+        "PWD": "/root",
+        "SHELL": "/bin/ash",
+        "SHLVL": "2",
+        "SSH_CLIENT": "2a0f:4ac0:acab:0:f4c9:3010:f020:be6c 55397 22",
+        "SSH_CONNECTION": "2a0f:4ac0:acab:0:f4c9:3010:f020:be6c 55397 2a03:4000:1:45d::1 22",
+        "USER": "root"
+    },
+    "ansible_eth0": {
+        "active": true,
+        "device": "eth0",
+        "ipv4": {
+            "address": "46.38.253.139",
+            "broadcast": "",
+            "netmask": "255.255.255.0",
+            "network": "46.38.253.0"
+        },
+        "ipv6": [
+            {
+                "address": "2a03:4000:1:45d::1",
+                "prefix": "64",
+                "scope": "global"
+            },
+            {
+                "address": "fe80::7436:5eff:fe67:3e05",
+                "prefix": "64",
+                "scope": "link"
+            }
+        ],
+        "macaddress": "76:36:5e:67:3e:05",
+        "module": "virtio_net",
+        "mtu": 1500,
+        "pciid": "virtio0",
+        "promisc": false,
+        "speed": -1,
+        "type": "ether"
+    },
+    "ansible_fibre_channel_wwn": [],
+    "ansible_fips": false,
+    "ansible_form_factor": "Other",
+    "ansible_fqdn": "localhost",
+    "ansible_hostname": "wanderduene",
+    "ansible_hostnqn": "",
+    "ansible_interfaces": [
+        "eth0",
+        "lo"
+    ],
+    "ansible_is_chroot": false,
+    "ansible_iscsi_iqn": "",
+    "ansible_kernel": "5.4.82-0-lts",
+    "ansible_kernel_version": "#1-Alpine SMP Wed, 09 Dec 2020 11:57:26 UTC",
+    "ansible_lo": {
+        "active": true,
+        "device": "lo",
+        "ipv4": {
+            "address": "127.0.0.1",
+            "broadcast": "",
+            "netmask": "255.0.0.0",
+            "network": "127.0.0.0"
+        },
+        "ipv6": [
+            {
+                "address": "::1",
+                "prefix": "128",
+                "scope": "host"
+            }
+        ],
+        "mtu": 65536,
+        "promisc": false,
+        "type": "loopback"
+    },
+    "ansible_local": {},
+    "ansible_lsb": {},
+    "ansible_machine": "x86_64",
+    "ansible_memfree_mb": 177,
+    "ansible_memory_mb": {
+        "nocache": {
+            "free": 1081,
+            "used": 907
+        },
+        "real": {
+            "free": 177,
+            "total": 1988,
+            "used": 1811
+        },
+        "swap": {
+            "cached": 0,
+            "free": 0,
+            "total": 0,
+            "used": 0
+        }
+    },
+    "ansible_memtotal_mb": 1988,
+    "ansible_mounts": [
+        {
+            "block_available": 675628,
+            "block_size": 4096,
+            "block_total": 2796610,
+            "block_used": 2120982,
+            "device": "/dev/sda3",
+            "fstype": "ext4",
+            "inode_available": 614590,
+            "inode_total": 712448,
+            "inode_used": 97858,
+            "mount": "/",
+            "options": "rw,relatime",
+            "size_available": 2767372288,
+            "size_total": 11454914560,
+            "uuid": "N/A"
+        },
+        {
+            "block_available": 64147,
+            "block_size": 1024,
+            "block_total": 95054,
+            "block_used": 30907,
+            "device": "/dev/sda1",
+            "fstype": "ext4",
+            "inode_available": 25664,
+            "inode_total": 25688,
+            "inode_used": 24,
+            "mount": "/boot",
+            "options": "rw,relatime",
+            "size_available": 65686528,
+            "size_total": 97335296,
+            "uuid": "N/A"
+        }
+    ],
+    "ansible_nodename": "wanderduene",
+    "ansible_os_family": "Alpine",
+    "ansible_pkg_mgr": "apk",
+    "ansible_proc_cmdline": {
+        "BOOT_IMAGE": "vmlinuz-lts",
+        "initrd": "initramfs-lts",
+        "modules": "sd-mod,usb-storage,ext4",
+        "nomodeset": true,
+        "quiet": true,
+        "root": "UUID=fc06e9aa-37fc-45ab-ad89-4f04e8ed78ba",
+        "rootfstype": "ext4"
+    },
+    "ansible_processor": [
+        "0",
+        "GenuineIntel",
+        "QEMU Virtual CPU version 2.5+",
+        "1",
+        "GenuineIntel",
+        "QEMU Virtual CPU version 2.5+"
+    ],
+    "ansible_processor_cores": 2,
+    "ansible_processor_count": 1,
+    "ansible_processor_nproc": 2,
+    "ansible_processor_threads_per_core": 1,
+    "ansible_processor_vcpus": 2,
+    "ansible_product_name": "Standard PC (i440FX + PIIX, 1996)",
+    "ansible_product_serial": "NA",
+    "ansible_product_uuid": "094f608f-d195-475c-af30-46f6b563fb17",
+    "ansible_product_version": "pc-i440fx-4.2",
+    "ansible_python": {
+        "executable": "/usr/bin/python3",
+        "has_sslcontext": true,
+        "type": "cpython",
+        "version": {
+            "major": 3,
+            "micro": 6,
+            "minor": 8,
+            "releaselevel": "final",
+            "serial": 0
+        },
+        "version_info": [
+            3,
+            8,
+            6,
+            "final",
+            0
+        ]
+    },
+    "ansible_python_version": "3.8.6",
+    "ansible_real_group_id": 0,
+    "ansible_real_user_id": 0,
+    "ansible_selinux": {
+        "status": "Missing selinux Python library"
+    },
+    "ansible_selinux_python_present": false,
+    "ansible_service_mgr": "openrc",
+    "ansible_ssh_host_key_dsa_public": "AAAAB3NzaC1kc3MAAACBALPM9MD+vWd/Im1NZmZ7yDETUh4ADpCXiR8nnPciFXjKUR4x7RY9BFfFrYY4GrfzE+P4Nn+YZUinp0t0fbaQZh7wni6SHi3z0iDTePzc09lM4aA3jI74ZzH7U26WSV4wrYnSMfnLGdn9NM22O33iBnjKBHTeBwt5VGQcy94fiL9nAAAAFQDa0Xm057SG0xhwOv1qmiiAnUSguQAAAIEApViV8M49bADkCxICjyxSQ0wnuqIMTH6bKnboiVDmAOVsgoijRx0xzVduDwV+BQGZ+GLt8P4eC93T2geKi3EymS94y381x5Pxe8n4MO/OzzUepwHiQMjvDI9uLJJgW52Vmy7OtwePyyHuTXBryrbYLgHZlzzEMT2g7AjwqW6IsGAAAACALrM3Tjb3yo+0K1WaEQWncEYnA5/O+8BpBP1kE97xz/Ow46iP5+OQQ6eKIgxwMNwBgPnYM7XXOkTktV77k+iCRJ872DV3+YVrhwrlzxKmkMe//PwWIoOaglt2RFULCuEZLReGSgdv+FFkWidi1ZcooqvkZJ4FASTB6rLB7SQc5dQ=",
+    "ansible_ssh_host_key_dsa_public_keytype": "ssh-dss",
+    "ansible_ssh_host_key_ecdsa_public": "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFRE/MKb9N/Fo3ZPi1Eo/ZLc2H6Mkby4OWK44ka5ktcGxlUZabMr5F/6DQMZVeHBppMdLsDIYBeGPs+LWcQw3z0=",
+    "ansible_ssh_host_key_ecdsa_public_keytype": "ecdsa-sha2-nistp256",
+    "ansible_ssh_host_key_ed25519_public": "AAAAC3NzaC1lZDI1NTE5AAAAIEEi+5CCc5eYZ63Ssi5pyL/nQw+m69Q023/Vd8c7RjpH",
+    "ansible_ssh_host_key_ed25519_public_keytype": "ssh-ed25519",
+    "ansible_ssh_host_key_rsa_public": "AAAAB3NzaC1yc2EAAAADAQABAAABgQC92RiW1B5qG7hkESJJTgi0HbCk8rKnIz+aK1v6u+Tp0dFFYcDJwaEmcQhOoTLaQFoUhA4IJaqBNbTD3PfOSuOYO69RhV6WilfyMptEb/h8i+OyI77pcIN/zAh5JkNe2Mi7mr756/086zOROIw4jK02YU4a1bNo/u6rAKWKe/WURRLPtTTFbxkG11fBBgeXpgaOwUTmBvcI+W60elVHQ6zxKe7mH2rWfNjvc0uGqiPJkGVhRuwz+eXE2UqLL6JcbQ1gMXK4dKfvLX7XHRgNuPWO9AAPra3AbEoTNFRym6m88ZFMsUWQvFcP5te0K2Vq/UrZDosv0rptY8IxSz37Ru9yQr+cY4dY2E/oDXd4gvKvMHQ6D1zxJNSd4x+hBHsjkofgAecWEEGaxj6zPLCt8X7HmVgsy3sqhEq0My4fGnvjOrb7MRihA8ofrqUiyCqRgYXzG4tDU/q9SPEAKxBIO/15S1mQXac8I8Lt5m0p5XIp8m2rL2a4LqwW2+ag2ToeMg8=",
+    "ansible_ssh_host_key_rsa_public_keytype": "ssh-rsa",
+    "ansible_swapfree_mb": 0,
+    "ansible_swaptotal_mb": 0,
+    "ansible_system": "Linux",
+    "ansible_system_capabilities": [],
+    "ansible_system_capabilities_enforced": "False",
+    "ansible_system_vendor": "QEMU",
+    "ansible_uptime_seconds": 157044,
+    "ansible_user_dir": "/root",
+    "ansible_user_gecos": "root",
+    "ansible_user_gid": 0,
+    "ansible_user_id": "root",
+    "ansible_user_shell": "/bin/ash",
+    "ansible_user_uid": 0,
+    "ansible_userspace_architecture": "x86_64",
+    "ansible_userspace_bits": "64",
+    "ansible_virtualization_role": "guest",
+    "ansible_virtualization_type": "kvm",
+    "discovered_interpreter_python": "/usr/bin/python3",
+    "gather_subset": [
+        "all"
+    ],
+    "module_setup": true
+}+
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
@@ -0,0 +1,7 @@
+.DS_Store
+.DS_Store?
+._*
+.Spotlight-V100
+.Trashes
+ehthumbs.db
+Thumbs.db
diff --git a/_playbook-router-alpine.yml b/_playbook-router-alpine.yml
@@ -0,0 +1,110 @@
+---
+- hosts: all
+  remote_user: root
+  gather_facts: false
+  tasks:
+    - name: "[Alpine] Install Python"
+      raw: test -e /usr/bin/python || (test -e /sbin/apk && apk update && apk add python3; true)
+    - name: "[Archlinux] Install Python"
+      raw: test -e /usr/bin/python || (test -e /usr/bin/pacman && pacman -Sy --noconfirm python; true)
+
+- hosts: lollo
+  name: Install lollo
+  roles:
+    - common
+    - dnsmasq
+    - hostapd
+    - syncthing
+    - frp
+  vars:
+    alpineVersion: v3.12
+    users:
+      - name: leah
+        groups: "wheel"
+        password: "$6$foobar123$1qcCmnoveirSdWY9XdgH5hCXv32hj0n/AyJX46sSp1LyGCA8QT/xxifebRxr89uIH6vwhzFGgz4.H2sG0en0f0"
+        sshKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCw/G6x8H3ojvHx3NsTswBMMmOhp48F3rea0GUniKSvRLMRIti5b7Q4P4FXnkQEtuNSR3u7gE5r4EacaLaIx7Az9SgHRoE+hdzSo4mPAwKTx/E3HZgIjdZhTDL8PAn4SZZT6RBqr/uGb+x9fdIjY0FbdNBLjq0MNnG3T+qd1joUL8JXoS7F//ac52RhHlsA5qJXFDOhpqR/7hRMwOFNH0GKaLN1xQKcOjhpIcdswpOf8kRDVpT7xOYwfXCFF4MaY2M8047WKarvEnGdADIIw6bvWsdJINehtOQmYEFRaMuaWp1d9bglZXZKPQKNubv5lqneMP4AI7ImDYjgW6eNLIT1 cardno:000603502829"
+    network:
+      hostname: lollo
+      domain: ctu.cx
+      nameservers:
+        - 1.1.1.1
+        - 8.8.8.8
+      useFerm: true
+      fermConfig: config-files/ferm/ferm-lollo.conf
+      useAwall: false
+      vlanSupport: true
+      natSupport: true
+      bridgeSupport: true
+      interfaces:
+        - name: lo
+          loopback: true
+        - name: eth0
+          ipv4:
+            dhcp: true
+          ipv6:
+            stateless: true
+        - name: eth0.5
+          manual: true
+        - name: brlan0
+          bridge_ports: eth0.5
+          bridge_stp: false
+          ipv4:
+            addresses:
+              - 10.0.0.1/24
+              - 195.39.246.32/28
+          ipv6:
+            address: 2a0f:4ac0:acab::1/48
+
+    hostapd:
+      interface: wlp3s0
+      bridge: brlan
+      channel: 1
+      ssid: legacy.home.ctu.cx
+      passphrase: "{{ lookup('community.general.passwordstore', 'WiFi/legacy.home.ctu.cx returnall=true')}}"
+
+    dnsmasq:
+      wan_interface: enp2s0
+      local_service: true
+      no_resolv: true
+      domain_needed: true
+      bogus_priv: true
+      expand_hosts: true
+      read_ethers: true
+      enable_ra: true
+      quiet_ra: true
+      domain: home.ctu.cx
+      dns_servers:
+        - 1.1.1.1
+        - 1.0.0.1
+        - 8.8.8.8
+        - 8.8.4.4
+      dhcp:
+        authoritative: true
+        rapid_commit: true
+        sequential_ip: true
+        options:
+          - option6:information-refresh-time,6h
+          - option:dns-server,10.0.0.1
+          - option:router,10.0.0.1
+        ranges:
+          - 195.39.246.33, 195.39.246.42, 255.255.255.240, 48h
+          - 10.0.0.40,     10.0.0.253,    255.255.255.0,   48h
+          - 2a0f:4ac0:acab::, ra-names, 48h
+
+    syncthing:
+      user: leah
+      guiAddress: 0.0.0.0:8384
+      reverseProxy:
+        enable: false
+
+    frpc:
+      serverAddress: wanderduene.ctu.cx
+      serverPort: 5050
+      token: "{{ lookup('community.general.passwordstore', 'server/wanderduene/frps/token returnall=true')}}"
+      dashboard: false
+      tunnels:
+        - name: lollo-ssh
+          type: tcp
+          local_ip: 127.0.0.1
+          local_port: 22
+          remote_port: 2202
diff --git a/alpine/.DS_Store b/alpine/.DS_Store  Binary files differ.
diff --git a/alpine/config-files/acme-redirect/acme-redirect.conf b/alpine/config-files/acme-redirect/acme-redirect.conf
@@ -1,3 +0,0 @@
-[acme]
-acme_email = "lets-encrypt@ctu.cx"
-acme_url   = "https://api.buypass.com/acme/directory"
diff --git a/alpine/config-files/awall/custom-services.json b/alpine/config-files/awall/custom-services.json
@@ -1,10 +0,0 @@
-{
-	"service": {
-		"submissions": [
-			{ "proto": "tcp", "port": 465 }
-		],
-		"frps": [
-			{ "proto": "tcp", "port": 5050 }
-		]
-	}
-}
diff --git a/alpine/config-files/awall/dns.json b/alpine/config-files/awall/dns.json
@@ -1,13 +0,0 @@
-{
-  "description": "Allow DNS on WAN",
-  "import": [ "base" ],
-
-  "filter": [
-    {
-      "in": "WAN",
-      "out": "_fw",
-      "service": "dns",
-      "action": "accept"
-    }
-  ]
-}
diff --git a/alpine/config-files/awall/frps.json b/alpine/config-files/awall/frps.json
@@ -1,13 +0,0 @@
-{
-  "description": "Allow FRPS on WAN",
-  "import": [ "base" ],
-
-  "filter": [
-    {
-      "in": "WAN",
-      "out": "_fw",
-      "service": "frps",
-      "action": "accept"
-    }
-  ]
-}
diff --git a/alpine/config-files/awall/mail.json b/alpine/config-files/awall/mail.json
@@ -1,37 +0,0 @@
-{
-  "description": "Allow mail specific ports on WAN",
-  "import": [ "base" ],
-
-  "filter": [
-    {
-      "in": "WAN",
-      "out": "_fw",
-      "service": "smtp",
-      "action": "accept"
-    },
-    {
-      "in": "WAN",
-      "out": "_fw",
-      "service": "submissions",
-      "action": "accept"
-    },
-    {
-      "in": "WAN",
-      "out": "_fw",
-      "service": "submission",
-      "action": "accept"
-    },
-    {
-      "in": "WAN",
-      "out": "_fw",
-      "service": "imap",
-      "action": "accept"
-    },
-    {
-      "in": "WAN",
-      "out": "_fw",
-      "service": "imaps",
-      "action": "accept"
-    }
-  ]
-}
diff --git a/alpine/config-files/awall/web.json b/alpine/config-files/awall/web.json
@@ -1,19 +0,0 @@
-{
-  "description": "Allow HTTP(S) on WAN",
-  "import": [ "base" ],
-
-  "filter": [
-    {
-      "in": "WAN",
-      "out": "_fw",
-      "service": "http",
-      "action": "accept"
-    },
-    {
-      "in": "WAN",
-      "out": "_fw",
-      "service": "https",
-      "action": "accept"
-    }
-  ]
-}
diff --git a/alpine/config-files/cgit/cgitrc b/alpine/config-files/cgit/cgitrc
@@ -1,59 +0,0 @@
-css=/cgit-ctucx.css
-logo=/cgit.png
-virtual-root=/
-
-root-title=ctucx.cgit
-root-desc=my personal git repos
-
-local-time=1
-
-cache-size=30
-
-readme=:README.md
-readme=:readme.md
-readme=:README.mkd
-readme=:readme.mkd
-readme=:README.rst
-readme=:readme.rst
-readme=:README.html
-readme=:readme.html
-readme=:README.htm
-readme=:readme.htm
-readme=:README.txt
-readme=:readme.txt
-readme=:README
-readme=:readme
-
-about-filter=/usr/lib/cgit/filters/about-formatting.sh
-source-filter=/usr/lib/cgit/filters/syntax-highlighting.py
-
-snapshots=tar.gz tar.bz2 zip
-
-max-stats=quarter
-
-clone-url=http://cgit.ctu.cx/$CGIT_REPO_URL git@wanderduene.ctu.cx:$CGIT_REPO_URL
-
-enable-commit-graph=1
-
-enable-index-links=1
-enable-index-owner=0
-
-enable-blame=1
-
-enable-log-filecount=1
-enable-log-linecount=1
-
-enable-http-clone=1
-enable-git-config=1
-
-mimetype.gif=image/gif
-mimetype.html=text/html
-mimetype.jpg=image/jpeg
-mimetype.jpeg=image/jpeg
-mimetype.pdf=application/pdf
-mimetype.png=image/png
-mimetype.svg=image/svg+xml
-
-remove-suffix=1
-project-list=/var/lib/git/projects.list
-scan-path=/var/lib/git/repositories
diff --git a/alpine/config-files/ferm/ferm-lollo.conf b/alpine/config-files/ferm/ferm-lollo.conf
@@ -1,95 +0,0 @@
-# -*- shell-script -*-
-#
-# Ferm example script
-#
-# Firewall configuration for a router with a dynamic IP.
-#
-# Author: Max Kellermann <max@duempel.org>
-#
-
-@def $DEV_LAN = brlan0;
-@def $DEV_WAN = eth0;
-
-@def $NET_LAN = 10.0.0.0/24;
-
-# globally accessible services
-@def $WAN_TCP = ( 22 );
-@def $WAN_UDP = ( 1194 );
-# ( ssh )
-# ( wireguard )
-
-# locally accessible services
-@def $LAN_TCP = ( 53 22 );
-@def $LAN_UDP = ( 53 67 69 123 );
-# ( dns ssh )
-# ( dns dhcp tftp ntp )
-
-# generic input and forwarding rules for ipv4 and ipv6
-domain (ip ip6) {
-    table filter {
-        chain INPUT {
-            policy DROP;
-
-            # connection tracking
-            mod state state INVALID DROP;
-            mod state state (ESTABLISHED RELATED) ACCEPT;
-
-            # allow local connections
-            interface lo ACCEPT;
-
-            # respond to ping
-        proto ipv6-icmp icmpv6-type redirect DROP;
-        proto ipv6-icmp icmpv6-type 139 DROP;
-        proto ipv6-icmp ACCEPT;
-            proto icmp ACCEPT;
-
-            # local services
-            interface ! $DEV_WAN {
-                proto tcp dport $LAN_TCP ACCEPT;
-                proto udp mod multiport destination-ports $LAN_UDP ACCEPT;
-            }
-
-            proto tcp dport $WAN_TCP ACCEPT;
-            proto udp dport $WAN_UDP ACCEPT;
-        }
-
-        # outgoing connections are not limited
-        chain OUTPUT policy ACCEPT;
-
-        chain FORWARD {
-            policy DROP;
-
-            # connection tracking
-            mod state state INVALID DROP;
-            mod state state (ESTABLISHED RELATED) ACCEPT;
-
-            # local clients can do whatever
-            interface $DEV_LAN ACCEPT;
-
-
-            proto icmp ACCEPT;
-
-            mod conntrack ctstate DNAT ACCEPT;
-
-            # the rest is dropped by the above policy
-        }
-    }
-}
-
-# nat only for ipv4
-domain ip {
-    table nat {
-        chain PREROUTING {
-            policy ACCEPT;
-
-            # port forwards, ala daddr $WAN_IP dport 65522 DNAT to 192.168.0.2:22;
-        }
-
-        chain POSTROUTING {
-            policy ACCEPT;
-
-            outerface $DEV_WAN MASQUERADE;
-            saddr $NET_LAN mod conntrack ctstate DNAT MASQUERADE; # needle point loopback
-        }
-    }
-}-
\ No newline at end of file
diff --git a/alpine/config-files/grafana/grafana.ini b/alpine/config-files/grafana/grafana.ini
@@ -1,818 +0,0 @@
-##################### Grafana Configuration Example #####################
-#
-# Everything has defaults so you only need to uncomment things you want to
-# change
-
-# possible values : production, development
-app_mode = production
-
-# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
-instance_name = ctucx.grafana
-
-#################################### Paths ####################################
-[paths]
-# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
-;data = /var/lib/grafana
-
-# Temporary files in `data` directory older than given duration will be removed
-;temp_data_lifetime = 24h
-
-# Directory where grafana can store logs
-;logs = /var/log/grafana
-
-# Directory where grafana will automatically scan and look for plugins
-;plugins = /var/lib/grafana/plugins
-
-# folder that contains provisioning config files that grafana will apply on startup and while running.
-;provisioning = /etc/grafana/provisioning
-
-#################################### Server ####################################
-[server]
-# Protocol (http, https, h2, socket)
-;protocol = http
-
-# The ip address to bind to, empty will bind to all interfaces
-http_addr = 127.0.0.1
-
-# The http port  to use
-;http_port = 3000
-
-# The public facing domain name used to access grafana from a browser
-domain = grafana.ctu.cx
-
-# Redirect to correct domain if host header does not match domain
-# Prevents DNS rebinding attacks
-;enforce_domain = false
-
-# The full public facing url you use in browser, used for redirects and emails
-# If you use reverse proxy and sub path specify full url (with sub path)
-;root_url = %(protocol)s://%(domain)s:%(http_port)s/
-
-# Serve Grafana from subpath specified in `root_url` setting. By default it is set to `false` for compatibility reasons.
-;serve_from_sub_path = false
-
-# Log web requests
-;router_logging = false
-
-# the path relative working path
-;static_root_path = public
-
-# enable gzip
-;enable_gzip = false
-
-# https certs & key file
-;cert_file =
-;cert_key =
-
-# Unix socket path
-;socket =
-
-#################################### Database ####################################
-[database]
-# You can configure the database connection by specifying type, host, name, user and password
-# as separate properties or as on string using the url properties.
-
-# Either "mysql", "postgres" or "sqlite3", it's your choice
-;type = sqlite3
-;host = 127.0.0.1:3306
-;name = grafana
-;user = root
-# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
-;password =
-
-# Use either URL or the previous fields to configure the database
-# Example: mysql://user:secret@host:port/database
-;url =
-
-# For "postgres" only, either "disable", "require" or "verify-full"
-;ssl_mode = disable
-
-;ca_cert_path =
-;client_key_path =
-;client_cert_path =
-;server_cert_name =
-
-# For "sqlite3" only, path relative to data_path setting
-;path = grafana.db
-
-# Max idle conn setting default is 2
-;max_idle_conn = 2
-
-# Max conn setting default is 0 (mean not set)
-;max_open_conn =
-
-# Connection Max Lifetime default is 14400 (means 14400 seconds or 4 hours)
-;conn_max_lifetime = 14400
-
-# Set to true to log the sql calls and execution times.
-;log_queries =
-
-# For "sqlite3" only. cache mode setting used for connecting to the database. (private, shared)
-;cache_mode = private
-
-#################################### Cache server #############################
-[remote_cache]
-# Either "redis", "memcached" or "database" default is "database"
-;type = database
-
-# cache connectionstring options
-# database: will use Grafana primary database.
-# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=0,ssl=false`. Only addr is required. ssl may be 'true', 'false', or 'insecure'.
-# memcache: 127.0.0.1:11211
-;connstr =
-
-#################################### Data proxy ###########################
-[dataproxy]
-
-# This enables data proxy logging, default is false
-;logging = false
-
-# How long the data proxy waits before timing out, default is 30 seconds.
-# This setting also applies to core backend HTTP data sources where query requests use an HTTP client with timeout set.
-;timeout = 30
-
-# If enabled and user is not anonymous, data proxy will add X-Grafana-User header with username into the request, default is false.
-;send_user_header = false
-
-#################################### Analytics ####################################
-[analytics]
-# Server reporting, sends usage counters to stats.grafana.org every 24 hours.
-# No ip addresses are being tracked, only simple counters to track
-# running instances, dashboard and error counts. It is very helpful to us.
-# Change this option to false to disable reporting.
-;reporting_enabled = true
-
-# Set to false to disable all checks to https://grafana.net
-# for new versions (grafana itself and plugins), check is used
-# in some UI views to notify that grafana or plugin update exists
-# This option does not cause any auto updates, nor send any information
-# only a GET request to http://grafana.com to get latest versions
-;check_for_updates = true
-
-# Google Analytics universal tracking code, only enabled if you specify an id here
-;google_analytics_ua_id =
-
-# Google Tag Manager ID, only enabled if you specify an id here
-;google_tag_manager_id =
-
-#################################### Security ####################################
-[security]
-# disable creation of admin user on first start of grafana
-;disable_initial_admin_creation = true
-
-# default admin user, created on startup
-;admin_user = admin
-
-# default admin password, can be changed before first start of grafana,  or in profile settings
-;admin_password = admin
-
-# used for signing
-;secret_key = SW2YcwTIb9zpOOhoPsMm
-
-# disable gravatar profile images
-disable_gravatar = true
-
-# data source proxy whitelist (ip_or_domain:port separated by spaces)
-;data_source_proxy_whitelist =
-
-# disable protection against brute force login attempts
-;disable_brute_force_login_protection = false
-
-# set to true if you host Grafana behind HTTPS. default is false.
-;cookie_secure = false
-
-# set cookie SameSite attribute. defaults to `lax`. can be set to "lax", "strict", "none" and "disabled"
-;cookie_samesite = lax
-
-# set to true if you want to allow browsers to render Grafana in a <frame>, <iframe>, <embed> or <object>. default is false.
-;allow_embedding = false
-
-# Set to true if you want to enable http strict transport security (HSTS) response header.
-# This is only sent when HTTPS is enabled in this configuration.
-# HSTS tells browsers that the site should only be accessed using HTTPS.
-;strict_transport_security = false
-
-# Sets how long a browser should cache HSTS. Only applied if strict_transport_security is enabled.
-;strict_transport_security_max_age_seconds = 86400
-
-# Set to true if to enable HSTS preloading option. Only applied if strict_transport_security is enabled.
-;strict_transport_security_preload = false
-
-# Set to true if to enable the HSTS includeSubDomains option. Only applied if strict_transport_security is enabled.
-;strict_transport_security_subdomains = false
-
-# Set to true to enable the X-Content-Type-Options response header.
-# The X-Content-Type-Options response HTTP header is a marker used by the server to indicate that the MIME types advertised
-# in the Content-Type headers should not be changed and be followed.
-;x_content_type_options = true
-
-# Set to true to enable the X-XSS-Protection header, which tells browsers to stop pages from loading
-# when they detect reflected cross-site scripting (XSS) attacks.
-;x_xss_protection = true
-
-#################################### Snapshots ###########################
-[snapshots]
-# snapshot sharing options
-;external_enabled = true
-;external_snapshot_url = https://snapshots-origin.raintank.io
-;external_snapshot_name = Publish to snapshot.raintank.io
-
-# Set to true to enable this Grafana instance act as an external snapshot server and allow unauthenticated requests for
-# creating and deleting snapshots.
-;public_mode = false
-
-# remove expired snapshot
-;snapshot_remove_expired = true
-
-#################################### Dashboards History ##################
-[dashboards]
-# Number dashboard versions to keep (per dashboard). Default: 20, Minimum: 1
-;versions_to_keep = 20
-
-# Minimum dashboard refresh interval. When set, this will restrict users to set the refresh interval of a dashboard lower than given interval. Per default this is 5 seconds.
-# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
-;min_refresh_interval = 5s
-
-# Path to the default home dashboard. If this value is empty, then Grafana uses StaticRootPath + "dashboards/home.json"
-;default_home_dashboard_path =
-
-#################################### Users ###############################
-[users]
-# disable user signup / registration
-allow_sign_up = false
-
-# Allow non admin users to create organizations
-allow_org_create = false
-
-# Set to true to automatically assign new users to the default organization (id 1)
-;auto_assign_org = true
-
-# Set this value to automatically add new users to the provided organization (if auto_assign_org above is set to true)
-;auto_assign_org_id = 1
-
-# Default role new users will be automatically assigned (if disabled above is set to true)
-;auto_assign_org_role = Viewer
-
-# Require email validation before sign up completes
-;verify_email_enabled = false
-
-# Background text for the user field on the login page
-;login_hint = email or username
-;password_hint = password
-
-# Default UI theme ("dark" or "light")
-;default_theme = dark
-
-# External user management, these options affect the organization users view
-;external_manage_link_url =
-;external_manage_link_name =
-;external_manage_info =
-
-# Viewers can edit/inspect dashboard settings in the browser. But not save the dashboard.
-viewers_can_edit = true
-
-# Editors can administrate dashboard, folders and teams they create
-;editors_can_admin = false
-
-[auth]
-# Login cookie name
-;login_cookie_name = grafana_session
-
-# The maximum lifetime (duration) an authenticated user can be inactive before being required to login at next visit. Default is 7 days (7d). This setting should be expressed as a duration, e.g. 5m (minutes), 6h (hours), 10d (days), 2w (weeks), 1M (month). The lifetime resets at each successful token rotation
-;login_maximum_inactive_lifetime_duration =
-
-# The maximum lifetime (duration) an authenticated user can be logged in since login time before being required to login. Default is 30 days (30d). This setting should be expressed as a duration, e.g. 5m (minutes), 6h (hours), 10d (days), 2w (weeks), 1M (month).
-;login_maximum_lifetime_duration =
-
-# How often should auth tokens be rotated for authenticated users when being active. The default is each 10 minutes.
-;token_rotation_interval_minutes = 10
-
-# Set to true to disable (hide) the login form, useful if you use OAuth, defaults to false
-disable_login_form = true
-
-# Set to true to disable the signout link in the side menu. useful if you use auth.proxy, defaults to false
-;disable_signout_menu = false
-
-# URL to redirect the user to after sign out
-;signout_redirect_url =
-
-# Set to true to attempt login with OAuth automatically, skipping the login screen.
-# This setting is ignored if multiple OAuth providers are configured.
-;oauth_auto_login = false
-
-# OAuth state max age cookie duration in seconds. Defaults to 600 seconds.
-;oauth_state_cookie_max_age = 600
-
-# limit of api_key seconds to live before expiration
-;api_key_max_seconds_to_live = -1
-
-#################################### Anonymous Auth ######################
-[auth.anonymous]
-# enable anonymous access
-enabled = true
-
-# specify organization name that should be used for unauthenticated users
-org_name = Main Org.
-
-# specify role for unauthenticated users
-org_role = Viewer
-
-# mask the Grafana version number for unauthenticated users
-hide_version = true
-
-#################################### GitHub Auth ##########################
-[auth.github]
-;enabled = false
-;allow_sign_up = true
-;client_id = some_id
-;client_secret = some_secret
-;scopes = user:email,read:org
-;auth_url = https://github.com/login/oauth/authorize
-;token_url = https://github.com/login/oauth/access_token
-;api_url = https://api.github.com/user
-;allowed_domains =
-;team_ids =
-;allowed_organizations =
-
-#################################### GitLab Auth #########################
-[auth.gitlab]
-;enabled = false
-;allow_sign_up = true
-;client_id = some_id
-;client_secret = some_secret
-;scopes = api
-;auth_url = https://gitlab.com/oauth/authorize
-;token_url = https://gitlab.com/oauth/token
-;api_url = https://gitlab.com/api/v4
-;allowed_domains =
-;allowed_groups =
-
-#################################### Google Auth ##########################
-[auth.google]
-;enabled = false
-;allow_sign_up = true
-;client_id = some_client_id
-;client_secret = some_client_secret
-;scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email
-;auth_url = https://accounts.google.com/o/oauth2/auth
-;token_url = https://accounts.google.com/o/oauth2/token
-;api_url = https://www.googleapis.com/oauth2/v1/userinfo
-;allowed_domains =
-;hosted_domain =
-
-#################################### Grafana.com Auth ####################
-[auth.grafana_com]
-;enabled = false
-;allow_sign_up = true
-;client_id = some_id
-;client_secret = some_secret
-;scopes = user:email
-;allowed_organizations =
-
-#################################### Azure AD OAuth #######################
-[auth.azuread]
-;name = Azure AD
-;enabled = false
-;allow_sign_up = true
-;client_id = some_client_id
-;client_secret = some_client_secret
-;scopes = openid email profile
-;auth_url = https://login.microsoftonline.com/<tenant-id>/oauth2/v2.0/authorize
-;token_url = https://login.microsoftonline.com/<tenant-id>/oauth2/v2.0/token
-;allowed_domains =
-;allowed_groups =
-
-#################################### Okta OAuth #######################
-[auth.okta]
-;name = Okta
-;enabled = false
-;allow_sign_up = true
-;client_id = some_id
-;client_secret = some_secret
-;scopes = openid profile email groups
-;auth_url = https://<tenant-id>.okta.com/oauth2/v1/authorize
-;token_url = https://<tenant-id>.okta.com/oauth2/v1/token
-;api_url = https://<tenant-id>.okta.com/oauth2/v1/userinfo
-;allowed_domains =
-;allowed_groups =
-;role_attribute_path =
-
-#################################### Generic OAuth ##########################
-[auth.generic_oauth]
-;enabled = false
-;name = OAuth
-;allow_sign_up = true
-;client_id = some_id
-;client_secret = some_secret
-;scopes = user:email,read:org
-;email_attribute_name = email:primary
-;email_attribute_path =
-;login_attribute_path =
-;id_token_attribute_name =
-;auth_url = https://foo.bar/login/oauth/authorize
-;token_url = https://foo.bar/login/oauth/access_token
-;api_url = https://foo.bar/user
-;allowed_domains =
-;team_ids =
-;allowed_organizations =
-;role_attribute_path =
-;tls_skip_verify_insecure = false
-;tls_client_cert =
-;tls_client_key =
-;tls_client_ca =
-
-#################################### Basic Auth ##########################
-[auth.basic]
-;enabled = true
-
-#################################### Auth Proxy ##########################
-[auth.proxy]
-;enabled = false
-;header_name = X-WEBAUTH-USER
-;header_property = username
-;auto_sign_up = true
-;sync_ttl = 60
-;whitelist = 192.168.1.1, 192.168.2.1
-;headers = Email:X-User-Email, Name:X-User-Name
-# Read the auth proxy docs for details on what the setting below enables
-;enable_login_token = false
-
-#################################### Auth LDAP ##########################
-[auth.ldap]
-;enabled = false
-;config_file = /etc/grafana/ldap.toml
-;allow_sign_up = true
-
-# LDAP backround sync (Enterprise only)
-# At 1 am every day
-;sync_cron = "0 0 1 * * *"
-;active_sync_enabled = true
-
-#################################### SMTP / Emailing ##########################
-[smtp]
-;enabled = false
-;host = localhost:25
-;user =
-# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
-;password =
-;cert_file =
-;key_file =
-;skip_verify = false
-;from_address = admin@grafana.localhost
-;from_name = Grafana
-# EHLO identity in SMTP dialog (defaults to instance_name)
-;ehlo_identity = dashboard.example.com
-# SMTP startTLS policy (defaults to 'OpportunisticStartTLS')
-;startTLS_policy = NoStartTLS
-
-[emails]
-;welcome_email_on_sign_up = false
-;templates_pattern = emails/*.html
-
-#################################### Logging ##########################
-[log]
-# Either "console", "file", "syslog". Default is console and  file
-# Use space to separate multiple modes, e.g. "console file"
-;mode = console file
-
-# Either "debug", "info", "warn", "error", "critical", default is "info"
-;level = info
-
-# optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug
-;filters =
-
-# For "console" mode only
-[log.console]
-;level =
-
-# log line format, valid options are text, console and json
-;format = console
-
-# For "file" mode only
-[log.file]
-;level =
-
-# log line format, valid options are text, console and json
-;format = text
-
-# This enables automated log rotate(switch of following options), default is true
-;log_rotate = true
-
-# Max line number of single file, default is 1000000
-;max_lines = 1000000
-
-# Max size shift of single file, default is 28 means 1 << 28, 256MB
-;max_size_shift = 28
-
-# Segment log daily, default is true
-;daily_rotate = true
-
-# Expired days of log file(delete after max days), default is 7
-;max_days = 7
-
-[log.syslog]
-;level =
-
-# log line format, valid options are text, console and json
-;format = text
-
-# Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
-;network =
-;address =
-
-# Syslog facility. user, daemon and local0 through local7 are valid.
-;facility =
-
-# Syslog tag. By default, the process' argv[0] is used.
-;tag =
-
-#################################### Usage Quotas ########################
-[quota]
-; enabled = false
-
-#### set quotas to -1 to make unlimited. ####
-# limit number of users per Org.
-; org_user = 10
-
-# limit number of dashboards per Org.
-; org_dashboard = 100
-
-# limit number of data_sources per Org.
-; org_data_source = 10
-
-# limit number of api_keys per Org.
-; org_api_key = 10
-
-# limit number of orgs a user can create.
-; user_org = 10
-
-# Global limit of users.
-; global_user = -1
-
-# global limit of orgs.
-; global_org = -1
-
-# global limit of dashboards
-; global_dashboard = -1
-
-# global limit of api_keys
-; global_api_key = -1
-
-# global limit on number of logged in users.
-; global_session = -1
-
-#################################### Alerting ############################
-[alerting]
-# Disable alerting engine & UI features
-;enabled = true
-# Makes it possible to turn off alert rule execution but alerting UI is visible
-;execute_alerts = true
-
-# Default setting for new alert rules. Defaults to categorize error and timeouts as alerting. (alerting, keep_state)
-;error_or_timeout = alerting
-
-# Default setting for how Grafana handles nodata or null values in alerting. (alerting, no_data, keep_state, ok)
-;nodata_or_nullvalues = no_data
-
-# Alert notifications can include images, but rendering many images at the same time can overload the server
-# This limit will protect the server from render overloading and make sure notifications are sent out quickly
-;concurrent_render_limit = 5
-
-
-# Default setting for alert calculation timeout. Default value is 30
-;evaluation_timeout_seconds = 30
-
-# Default setting for alert notification timeout. Default value is 30
-;notification_timeout_seconds = 30
-
-# Default setting for max attempts to sending alert notifications. Default value is 3
-;max_attempts = 3
-
-# Makes it possible to enforce a minimal interval between evaluations, to reduce load on the backend
-;min_interval_seconds = 1
-
-# Configures for how long alert annotations are stored. Default is 0, which keeps them forever.
-# This setting should be expressed as a duration. Examples: 6h (hours), 10d (days), 2w (weeks), 1M (month).
-;max_annotation_age =
-
-# Configures max number of alert annotations that Grafana stores. Default value is 0, which keeps all alert annotations.
-;max_annotations_to_keep =
-
-#################################### Annotations #########################
-
-[annotations.dashboard]
-# Dashboard annotations means that annotations are associated with the dashboard they are created on.
-
-# Configures how long dashboard annotations are stored. Default is 0, which keeps them forever.
-# This setting should be expressed as a duration. Examples: 6h (hours), 10d (days), 2w (weeks), 1M (month).
-;max_age =
-
-# Configures max number of dashboard annotations that Grafana stores. Default value is 0, which keeps all dashboard annotations.
-;max_annotations_to_keep =
-
-[annotations.api]
-# API annotations means that the annotations have been created using the API without any
-# association with a dashboard.
-
-# Configures how long Grafana stores API annotations. Default is 0, which keeps them forever.
-# This setting should be expressed as a duration. Examples: 6h (hours), 10d (days), 2w (weeks), 1M (month).
-;max_age =
-
-# Configures max number of API annotations that Grafana keeps. Default value is 0, which keeps all API annotations.
-;max_annotations_to_keep =
-
-#################################### Explore #############################
-[explore]
-# Enable the Explore section
-enabled = false
-
-#################################### Internal Grafana Metrics ##########################
-# Metrics available at HTTP API Url /metrics
-[metrics]
-# Disable / Enable internal metrics
-;enabled           = true
-# Graphite Publish interval
-;interval_seconds  = 10
-# Disable total stats (stat_totals_*) metrics to be generated
-;disable_total_stats = false
-
-#If both are set, basic auth will be required for the metrics endpoint.
-; basic_auth_username =
-; basic_auth_password =
-
-# Send internal metrics to Graphite
-
-[metrics.graphite]
-# Enable by setting the address setting (ex localhost:2003)
-;address =
-;prefix = prod.grafana.%(instance_name)s.
-
-#################################### Grafana.com integration  ##########################
-# Url used to import dashboards directly from Grafana.com
-
-[grafana_com]
-;url = https://grafana.com
-
-#################################### Distributed tracing ############
-[tracing.jaeger]
-# Enable by setting the address sending traces to jaeger (ex localhost:6831)
-;address = localhost:6831
-# Tag that will always be included in when creating new spans. ex (tag1:value1,tag2:value2)
-;always_included_tag = tag1:value1
-# Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
-;sampler_type = const
-# jaeger samplerconfig param
-# for "const" sampler, 0 or 1 for always false/true respectively
-# for "probabilistic" sampler, a probability between 0 and 1
-# for "rateLimiting" sampler, the number of spans per second
-# for "remote" sampler, param is the same as for "probabilistic"
-# and indicates the initial sampling rate before the actual one
-# is received from the mothership
-;sampler_param = 1
-# Whether or not to use Zipkin propagation (x-b3- HTTP headers).
-;zipkin_propagation = false
-# Setting this to true disables shared RPC spans.
-# Not disabling is the most common setting when using Zipkin elsewhere in your infrastructure.
-;disable_shared_zipkin_spans = false
-
-#################################### External image storage ##########################
-[external_image_storage]
-# Used for uploading images to public servers so they can be included in slack/email messages.
-# you can choose between (s3, webdav, gcs, azure_blob, local)
-;provider =
-
-[external_image_storage.s3]
-;endpoint =
-;path_style_access =
-;bucket =
-;region =
-;path =
-;access_key =
-;secret_key =
-
-[external_image_storage.webdav]
-;url =
-;public_url =
-;username =
-;password =
-
-[external_image_storage.gcs]
-;key_file =
-;bucket =
-;path =
-
-[external_image_storage.azure_blob]
-;account_name =
-;account_key =
-;container_name =
-
-[external_image_storage.local]
-# does not require any configuration
-
-[rendering]
-# Options to configure a remote HTTP image rendering service, e.g. using https://github.com/grafana/grafana-image-renderer.
-# URL to a remote HTTP image renderer service, e.g. http://localhost:8081/render, will enable Grafana to render panels and dashboards to PNG-images using HTTP requests to an external service.
-;server_url =
-# If the remote HTTP image renderer service runs on a different server than the Grafana server you may have to configure this to a URL where Grafana is reachable, e.g. http://grafana.domain/.
-;callback_url =
-# Concurrent render request limit affects when the /render HTTP endpoint is used. Rendering many images at the same time can overload the server,
-# which this setting can help protect against by only allowing a certain amount of concurrent requests.
-;concurrent_render_request_limit = 30
-
-[panels]
-# If set to true Grafana will allow script tags in text panels. Not recommended as it enable XSS vulnerabilities.
-;disable_sanitize_html = false
-
-[plugins]
-;enable_alpha = false
-;app_tls_skip_verify_insecure = false
-# Enter a comma-separated list of plugin identifiers to identify plugins that are allowed to be loaded even if they lack a valid signature.
-;allow_loading_unsigned_plugins =
-
-#################################### Grafana Image Renderer Plugin ##########################
-[plugin.grafana-image-renderer]
-# Instruct headless browser instance to use a default timezone when not provided by Grafana, e.g. when rendering panel image of alert.
-# See ICU’s metaZones.txt (https://cs.chromium.org/chromium/src/third_party/icu/source/data/misc/metaZones.txt) for a list of supported
-# timezone IDs. Fallbacks to TZ environment variable if not set.
-;rendering_timezone =
-
-# Instruct headless browser instance to use a default language when not provided by Grafana, e.g. when rendering panel image of alert.
-# Please refer to the HTTP header Accept-Language to understand how to format this value, e.g. 'fr-CH, fr;q=0.9, en;q=0.8, de;q=0.7, *;q=0.5'.
-;rendering_language =
-
-# Instruct headless browser instance to use a default device scale factor when not provided by Grafana, e.g. when rendering panel image of alert.
-# Default is 1. Using a higher value will produce more detailed images (higher DPI), but will require more disk space to store an image.
-;rendering_viewport_device_scale_factor =
-
-# Instruct headless browser instance whether to ignore HTTPS errors during navigation. Per default HTTPS errors are not ignored. Due to
-# the security risk it's not recommended to ignore HTTPS errors.
-;rendering_ignore_https_errors =
-
-# Instruct headless browser instance whether to capture and log verbose information when rendering an image. Default is false and will
-# only capture and log error messages. When enabled, debug messages are captured and logged as well.
-# For the verbose information to be included in the Grafana server log you have to adjust the rendering log level to debug, configure
-# [log].filter = rendering:debug.
-;rendering_verbose_logging =
-
-# Instruct headless browser instance whether to output its debug and error messages into running process of remote rendering service.
-# Default is false. This can be useful to enable (true) when troubleshooting.
-;rendering_dumpio =
-
-# Additional arguments to pass to the headless browser instance. Default is --no-sandbox. The list of Chromium flags can be found
-# here (https://peter.sh/experiments/chromium-command-line-switches/). Multiple arguments is separated with comma-character.
-;rendering_args =
-
-# You can configure the plugin to use a different browser binary instead of the pre-packaged version of Chromium.
-# Please note that this is not recommended, since you may encounter problems if the installed version of Chrome/Chromium is not
-# compatible with the plugin.
-;rendering_chrome_bin =
-
-# Instruct how headless browser instances are created. Default is 'default' and will create a new browser instance on each request.
-# Mode 'clustered' will make sure that only a maximum of browsers/incognito pages can execute concurrently.
-# Mode 'reusable' will have one browser instance and will create a new incognito page on each request.
-;rendering_mode =
-
-# When rendering_mode = clustered you can instruct how many browsers or incognito pages can execute concurrently. Default is 'browser'
-# and will cluster using browser instances.
-# Mode 'context' will cluster using incognito pages.
-;rendering_clustering_mode =
-# When rendering_mode = clustered you can define maximum number of browser instances/incognito pages that can execute concurrently..
-;rendering_clustering_max_concurrency =
-
-# Limit the maximum viewport width, height and device scale factor that can be requested.
-;rendering_viewport_max_width =
-;rendering_viewport_max_height =
-;rendering_viewport_max_device_scale_factor =
-
-# Change the listening host and port of the gRPC server. Default host is 127.0.0.1 and default port is 0 and will automatically assign
-# a port not in use.
-;grpc_host =
-;grpc_port =
-
-[enterprise]
-# Path to a valid Grafana Enterprise license.jwt file
-;license_path =
-
-[feature_toggles]
-# enable features, separated by spaces
-;enable =
-
-[date_formats]
-# For information on what formatting patterns that are supported https://momentjs.com/docs/#/displaying/
-
-# Default system date format used in time range picker and other places where full time is displayed
-;full_date = YYYY-MM-DD HH:mm:ss
-
-# Used by graph and other places where we only show small intervals
-;interval_second = HH:mm:ss
-;interval_minute = HH:mm
-;interval_hour = MM/DD HH:mm
-;interval_day = MM/DD
-;interval_month = YYYY-MM
-;interval_year = YYYY
-
-# Experimental feature
-;use_browser_locale = false
-
-# Default timezone for user preferences. Options are 'browser' for the browser local timezone or a timezone name from IANA Time Zone database, e.g. 'UTC' or 'Europe/Amsterdam' etc.
-;default_timezone = browser
diff --git a/alpine/config-files/grafana/provisioning/dashboards/node-stats.json b/alpine/config-files/grafana/provisioning/dashboards/node-stats.json
@@ -1,1434 +0,0 @@
-{
-  "annotations": {
-    "list": [
-      {
-        "builtIn": 1,
-        "datasource": "-- Grafana --",
-        "enable": true,
-        "hide": true,
-        "iconColor": "rgba(0, 211, 255, 1)",
-        "name": "Annotations & Alerts",
-        "type": "dashboard"
-      }
-    ]
-  },
-  "editable": true,
-  "gnetId": null,
-  "graphTooltip": 1,
-  "id": 4,
-  "iteration": 1587588975022,
-  "links": [
-    {
-      "asDropdown": true,
-      "icon": "external link",
-      "tags": [],
-      "title": "",
-      "type": "dashboards"
-    }
-  ],
-  "panels": [
-    {
-      "cacheTimeout": null,
-      "datasource": null,
-      "gridPos": {
-        "h": 4,
-        "w": 12,
-        "x": 0,
-        "y": 0
-      },
-      "id": 28,
-      "links": [],
-      "options": {
-        "fieldOptions": {
-          "calcs": [
-            "last"
-          ],
-          "defaults": {
-            "mappings": [
-              {
-                "id": 0,
-                "op": "=",
-                "text": "up",
-                "type": 1,
-                "value": "1"
-              },
-              {
-                "from": "",
-                "id": 1,
-                "operator": "",
-                "text": "down",
-                "to": "",
-                "type": 1,
-                "value": "0"
-              }
-            ],
-            "max": 1,
-            "min": 0,
-            "nullValueMode": "connected",
-            "thresholds": [
-              {
-                "color": "red",
-                "value": null
-              },
-              {
-                "color": "green",
-                "value": 1
-              }
-            ],
-            "unit": "percentunit"
-          },
-          "override": {},
-          "values": false
-        },
-        "orientation": "vertical",
-        "showThresholdLabels": false,
-        "showThresholdMarkers": false
-      },
-      "pluginVersion": "6.5.2",
-      "targets": [
-        {
-          "expr": "up{instance=~\"$instance\"}",
-          "format": "time_series",
-          "interval": "",
-          "legendFormat": "{{ instance }}",
-          "refId": "A"
-        }
-      ],
-      "timeFrom": null,
-      "timeShift": null,
-      "title": "Status",
-      "type": "gauge"
-    },
-    {
-      "aliasColors": {},
-      "bars": true,
-      "cacheTimeout": null,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": null,
-      "fill": 10,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 5,
-        "w": 6,
-        "x": 12,
-        "y": 0
-      },
-      "hiddenSeries": false,
-      "id": 16,
-      "interval": "",
-      "legend": {
-        "alignAsTable": false,
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "rightSide": false,
-        "show": true,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "percentage": true,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": true,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "count (node_uname_info{instance=~\"$instance\"}) by (release)",
-          "format": "time_series",
-          "hide": false,
-          "instant": false,
-          "interval": "",
-          "intervalFactor": 1,
-          "legendFormat": "{{release}}",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Kernel",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "short",
-          "label": "",
-          "logBase": 1,
-          "max": "100",
-          "min": "0",
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "cacheTimeout": null,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": null,
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 5,
-        "w": 6,
-        "x": 18,
-        "y": 0
-      },
-      "hiddenSeries": false,
-      "id": 18,
-      "legend": {
-        "avg": false,
-        "current": true,
-        "hideEmpty": false,
-        "hideZero": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": true
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "percentage": false,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "time()-node_boot_time_seconds{instance=~\"$instance\"}",
-          "format": "time_series",
-          "intervalFactor": 1,
-          "legendFormat": "{{instance}}",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Uptime",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "s",
-          "label": "",
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": "",
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "cacheTimeout": null,
-      "datasource": null,
-      "gridPos": {
-        "h": 6,
-        "w": 12,
-        "x": 0,
-        "y": 4
-      },
-      "id": 29,
-      "links": [],
-      "options": {
-        "displayMode": "basic",
-        "fieldOptions": {
-          "calcs": [
-            "mean"
-          ],
-          "defaults": {
-            "mappings": [],
-            "max": 1,
-            "min": 0,
-            "nullValueMode": "connected",
-            "thresholds": [
-              {
-                "color": "red",
-                "value": null
-              },
-              {
-                "color": "yellow",
-                "value": 0.0001
-              },
-              {
-                "color": "green",
-                "value": 0.95
-              }
-            ],
-            "unit": "percentunit"
-          },
-          "override": {},
-          "values": false
-        },
-        "orientation": "vertical"
-      },
-      "pluginVersion": "6.5.2",
-      "targets": [
-        {
-          "expr": "up{instance=~\"$instance\"}",
-          "interval": "",
-          "legendFormat": "{{ instance }}",
-          "refId": "A"
-        }
-      ],
-      "timeFrom": null,
-      "timeShift": null,
-      "title": "Availability",
-      "type": "bargauge"
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": null,
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 5,
-        "w": 6,
-        "x": 12,
-        "y": 5
-      },
-      "hiddenSeries": false,
-      "id": 12,
-      "legend": {
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "paceLength": 10,
-      "percentage": false,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "irate(node_cpu_seconds_total{mode=\"user\",instance=~\"$instance\"}[2m])",
-          "format": "time_series",
-          "instant": false,
-          "intervalFactor": 1,
-          "legendFormat": "{{instance}} core{{cpu}}",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "CPU Load",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "percentunit",
-          "label": null,
-          "logBase": 1,
-          "max": "1",
-          "min": "0",
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": "Prometheus",
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 5,
-        "w": 6,
-        "x": 18,
-        "y": 5
-      },
-      "hiddenSeries": false,
-      "id": 2,
-      "legend": {
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "paceLength": 10,
-      "percentage": false,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "node_load1{instance=~\"$instance\"}",
-          "format": "time_series",
-          "interval": "",
-          "intervalFactor": 1,
-          "legendFormat": "{{instance}} 1 min avg",
-          "refId": "A"
-        },
-        {
-          "expr": "node_load5{instance=~\"$instance\"}",
-          "interval": "",
-          "legendFormat": "{{instance}} 5 min avg",
-          "refId": "B"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Load",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "cacheTimeout": null,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": null,
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 8,
-        "w": 12,
-        "x": 0,
-        "y": 10
-      },
-      "hiddenSeries": false,
-      "id": 4,
-      "legend": {
-        "avg": false,
-        "current": true,
-        "max": false,
-        "min": false,
-        "rightSide": false,
-        "show": true,
-        "total": false,
-        "values": true
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "paceLength": 10,
-      "percentage": false,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [
-        {}
-      ],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "(node_filesystem_size_bytes{device=~\"/dev/.*\",mountpoint!=\"/nix/store\",instance=~\"$instance\"} - node_filesystem_free_bytes{instance=~\"$instance\"}) / node_filesystem_size_bytes{device=~\"/dev/.*\",mountpoint!=\"/nix/store\",instance=~\"$instance\"}",
-          "format": "time_series",
-          "hide": false,
-          "instant": false,
-          "interval": "",
-          "intervalFactor": 1,
-          "legendFormat": "{{instance}} {{mountpoint}} used",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Disk Space",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "decimals": null,
-          "format": "percentunit",
-          "label": null,
-          "logBase": 1,
-          "max": "1",
-          "min": "0",
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": null,
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 8,
-        "w": 12,
-        "x": 12,
-        "y": 10
-      },
-      "hiddenSeries": false,
-      "id": 6,
-      "legend": {
-        "avg": true,
-        "current": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": true
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "paceLength": 10,
-      "percentage": false,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "(node_memory_MemTotal_bytes{instance=~\"$instance\"} - node_memory_MemAvailable_bytes{instance=~\"$instance\"}) / node_memory_MemTotal_bytes{instance=~\"$instance\"}",
-          "format": "time_series",
-          "intervalFactor": 1,
-          "legendFormat": "{{instance}} used",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "RAM Usage",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "percentunit",
-          "label": null,
-          "logBase": 1,
-          "max": "1",
-          "min": "0",
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": null,
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 8,
-        "w": 12,
-        "x": 0,
-        "y": 18
-      },
-      "hiddenSeries": false,
-      "id": 8,
-      "legend": {
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "paceLength": 10,
-      "percentage": false,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "max(irate(node_network_receive_bytes_total{instance=~\"$instance\",device!=\"lo\"}[2m])) by (instance)",
-          "format": "time_series",
-          "hide": false,
-          "instant": false,
-          "intervalFactor": 1,
-          "legendFormat": "{{instance}} rx",
-          "refId": "B"
-        },
-        {
-          "expr": "max(irate(node_network_transmit_bytes_total{instance=~\"$instance\",device!=\"lo\"}[2m])) by (instance)",
-          "format": "time_series",
-          "hide": false,
-          "instant": false,
-          "intervalFactor": 1,
-          "legendFormat": "{{instance}} tx",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Network Traffic",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "Bps",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": "0",
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": null,
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 8,
-        "w": 12,
-        "x": 12,
-        "y": 18
-      },
-      "hiddenSeries": false,
-      "id": 10,
-      "legend": {
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "paceLength": 10,
-      "percentage": false,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "node_disk_io_now{instance=~\"$instance\"}",
-          "format": "time_series",
-          "intervalFactor": 1,
-          "legendFormat": "{{instance}} {{device}}",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Disk IO",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "decimals": 0,
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": "0",
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": null,
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 5,
-        "w": 6,
-        "x": 0,
-        "y": 26
-      },
-      "hiddenSeries": false,
-      "id": 14,
-      "legend": {
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "links": [],
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "paceLength": 10,
-      "percentage": false,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "node_entropy_available_bits{instance=~\"$instance\"}",
-          "format": "time_series",
-          "intervalFactor": 1,
-          "legendFormat": "{{instance}}",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Entropy",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "decbits",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": null,
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 5,
-        "w": 6,
-        "x": 6,
-        "y": 26
-      },
-      "hiddenSeries": false,
-      "id": 20,
-      "legend": {
-        "avg": false,
-        "current": true,
-        "hideZero": true,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": true
-      },
-      "lines": true,
-      "linewidth": 1,
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "percentage": false,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "(node_memory_SwapTotal_bytes{instance=~\"$instance\"} - node_memory_SwapFree_bytes{instance=~\"$instance\"}) / node_memory_SwapTotal_bytes{instance=~\"$instance\"}",
-          "legendFormat": "{{ instance }}",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Swap Usage",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "percentunit",
-          "label": null,
-          "logBase": 1,
-          "max": "1",
-          "min": "0",
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": null,
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 5,
-        "w": 12,
-        "x": 12,
-        "y": 26
-      },
-      "hiddenSeries": false,
-      "id": 26,
-      "legend": {
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "percentage": false,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "avg(node_cpu_scaling_frequency_hertz{instance=~\"$instance\"}) by (instance)",
-          "legendFormat": "{{ instance }}",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "CPU Frequency",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "hertz",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": null,
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 8,
-        "w": 12,
-        "x": 0,
-        "y": 31
-      },
-      "hiddenSeries": false,
-      "id": 24,
-      "legend": {
-        "avg": true,
-        "current": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": true
-      },
-      "lines": true,
-      "linewidth": 1,
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "percentage": false,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "max(node_hwmon_curr_amps{instance=~\"$instance\"}) by (instance) * max(node_hwmon_in_volts{instance=~\"$instance\"}) by (instance)",
-          "legendFormat": "{{ instance }}",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Power Consumption",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "watt",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    },
-    {
-      "aliasColors": {},
-      "bars": false,
-      "dashLength": 10,
-      "dashes": false,
-      "datasource": null,
-      "fill": 1,
-      "fillGradient": 0,
-      "gridPos": {
-        "h": 8,
-        "w": 12,
-        "x": 12,
-        "y": 31
-      },
-      "hiddenSeries": false,
-      "id": 22,
-      "legend": {
-        "avg": false,
-        "current": false,
-        "max": false,
-        "min": false,
-        "show": true,
-        "total": false,
-        "values": false
-      },
-      "lines": true,
-      "linewidth": 1,
-      "nullPointMode": "null",
-      "options": {
-        "dataLinks": []
-      },
-      "percentage": false,
-      "pointradius": 2,
-      "points": false,
-      "renderer": "flot",
-      "seriesOverrides": [],
-      "spaceLength": 10,
-      "stack": false,
-      "steppedLine": false,
-      "targets": [
-        {
-          "expr": "node_hwmon_temp_celsius{instance=~\"$instance\"}",
-          "legendFormat": "{{ instance }} {{ sensor }}",
-          "refId": "A"
-        }
-      ],
-      "thresholds": [],
-      "timeFrom": null,
-      "timeRegions": [],
-      "timeShift": null,
-      "title": "Temperature",
-      "tooltip": {
-        "shared": true,
-        "sort": 0,
-        "value_type": "individual"
-      },
-      "type": "graph",
-      "xaxis": {
-        "buckets": null,
-        "mode": "time",
-        "name": null,
-        "show": true,
-        "values": []
-      },
-      "yaxes": [
-        {
-          "format": "celsius",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        },
-        {
-          "format": "short",
-          "label": null,
-          "logBase": 1,
-          "max": null,
-          "min": null,
-          "show": true
-        }
-      ],
-      "yaxis": {
-        "align": false,
-        "alignLevel": null
-      }
-    }
-  ],
-  "refresh": false,
-  "schemaVersion": 21,
-  "style": "dark",
-  "tags": [
-    "linux",
-    "node_exporter"
-  ],
-  "templating": {
-    "list": [
-      {
-        "allValue": null,
-        "current": {
-          "selected": false,
-          "text": "taurus.ctu.cx:443",
-          "value": "taurus.ctu.cx:443"
-        },
-        "datasource": "Prometheus",
-        "definition": "label_values(node_uname_info, instance)",
-        "hide": 0,
-        "includeAll": false,
-        "label": "Host",
-        "multi": true,
-        "name": "instance",
-        "options": [],
-        "query": "label_values(node_uname_info, instance)",
-        "refresh": 1,
-        "regex": "",
-        "skipUrlSync": false,
-        "sort": 1,
-        "tagValuesQuery": "",
-        "tags": [],
-        "tagsQuery": "",
-        "type": "query",
-        "useTags": false
-      }
-    ]
-  },
-  "time": {
-    "from": "now-6h",
-    "to": "now"
-  },
-  "timepicker": {
-    "refresh_intervals": [
-      "5s",
-      "10s",
-      "30s",
-      "1m",
-      "5m",
-      "15m",
-      "30m",
-      "1h",
-      "2h",
-      "1d"
-    ],
-    "time_options": [
-      "5m",
-      "15m",
-      "1h",
-      "6h",
-      "12h",
-      "24h",
-      "2d",
-      "7d",
-      "30d"
-    ]
-  },
-  "timezone": "",
-  "title": "Node Stats",
-  "uid": "stats",
-  "version": 3
-}-
\ No newline at end of file
diff --git a/alpine/config-files/nginx/nginx.conf b/alpine/config-files/nginx/nginx.conf
@@ -1,52 +0,0 @@
-user nginx;
-
-worker_processes auto;
-
-pcre_jit on;
-
-error_log /var/log/nginx/error.log warn;
-
-include /etc/nginx/modules/*.conf;
-
-
-events {
-	worker_connections 1024;
-}
-
-http {
-	include /etc/nginx/mime.types;
-	default_type application/octet-stream;
-
-	server_tokens off;
-
-	server_names_hash_bucket_size 64;
-
-	client_max_body_size 1m;
-
-	keepalive_timeout 65;
-
-	sendfile on;
-
-	tcp_nodelay on;
-
-	ssl_prefer_server_ciphers on;
-
-	ssl_session_cache shared:SSL:2m;
-
-
-	gzip on;
-
-	gzip_vary on;
-
-	#gzip_static on;
-
-
-	# Specifies the main log format.
-	log_format main '$remote_addr - $remote_user [$time_local] "$request" '
-			'$status $body_bytes_sent "$http_referer" '
-			'"$http_user_agent" "$http_x_forwarded_for"';
-
-	access_log /var/log/nginx/access.log main;
-
-	include /etc/nginx/conf.d/*.conf;
-}
diff --git a/alpine/config-files/nginx/proxy.conf b/alpine/config-files/nginx/proxy.conf
@@ -1,16 +0,0 @@
-#make websockets possible
-proxy_http_version      1.1;
-proxy_set_header        Upgrade $http_upgrade;
-proxy_set_header        Connection "upgrade";
-
-#some headers needed for some software to work
-proxy_redirect          off;
-proxy_connect_timeout   90;
-proxy_send_timeout      90;
-proxy_read_timeout      90;
-proxy_set_header        Host $host;
-proxy_set_header        X-Real-IP $remote_addr;
-proxy_set_header        X-Forwarded-For $proxy_add_x_forwarded_for;
-proxy_set_header        X-Forwarded-Proto $scheme;
-proxy_set_header        X-Forwarded-Host $host;
-proxy_set_header        X-Forwarded-Server $host;
diff --git a/alpine/config-files/nginx/ssl.conf b/alpine/config-files/nginx/ssl.conf
@@ -1,21 +0,0 @@
-ssl_session_timeout 1d;
-ssl_session_cache shared:MozSSL:10m;  # about 40000 sessions
-ssl_session_tickets off;
-
-# curl https://ssl-config.mozilla.org/ffdhe2048.txt > /path/to/dhparam
-ssl_dhparam /etc/nginx/dhparam;
-
-# intermediate configuration
-ssl_protocols TLSv1.2 TLSv1.3;
-ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
-ssl_prefer_server_ciphers off;
-
-# HSTS (ngx_http_headers_module is required) (63072000 seconds)
-add_header Strict-Transport-Security "max-age=63072000" always;
-
-# OCSP stapling
-ssl_stapling on;
-ssl_stapling_verify on;
-
-# replace with the IP address of your resolver
-resolver 127.0.0.1;
diff --git a/alpine/config-files/prometheus/prometheus.yml b/alpine/config-files/prometheus/prometheus.yml
@@ -1,42 +0,0 @@
-global:
-  scrape_interval:     20s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
-  evaluation_interval: 1m # Evaluate rules every 15 seconds. The default is every 1 minute.
-
-alerting:
-  alertmanagers:
-  - static_configs:
-    - targets:
-      # - alertmanager:9093
-
-
-rule_files:
-  # - "first_rules.yml"
-  # - "second_rules.yml"
-
-scrape_configs:
-  - job_name: 'prometheus'
-    static_configs:
-    - targets: ['localhost:9090']
-
-  - job_name: 'node-exporter'
-    metrics_path: '/node-exporter'
-    scheme: 'https'
-    scrape_interval: 30s
-    static_configs:
-    - targets: [
-      'wanderduene.ctu.cx',
-      'taurus.ctu.cx',
-      'repo.f2k1.de',
-      'toaster.frp.ctu.cx',
-      'stasicontainer-mac.frp.ctu.cx'
-    ]
-
-  - job_name: 'fritzbox-exporter'
-    metrics_path: '/metrics'
-    scheme: 'https'
-    scrape_interval: 30s
-    static_configs:
-    - targets: [
-      'fbexporter.ctu.cx',
-      'fbexporter.f2k1.de'
-    ]
diff --git a/alpine/config-files/rest-server/rest-server.initd b/alpine/config-files/rest-server/rest-server.initd
@@ -1,15 +0,0 @@
-#!/sbin/openrc-run
-supervisor=supervise-daemon
-
-name="rest-server"
-description="Rest Server is a high performance HTTP server that implements restic's REST backend API."
-
-command="/usr/bin/rest-server"
-command_args="--append-only --listen 127.0.0.1:8060 --no-auth --path /var/lib/rest-server --prometheus"
-command_user=leah:leah
-directory="/var/lib/rest-server"
-
-depend() {
-	need net localmount
-	after firewall
-}
diff --git a/alpine/config-files/sudo/sudoers.patch b/alpine/config-files/sudo/sudoers.patch
@@ -1,11 +0,0 @@
---- sudoers
-+++ /etc/sudoers
-@@ -79,7 +79,7 @@
- root ALL=(ALL) ALL
-
- ## Uncomment to allow members of group wheel to execute any command
--# %wheel ALL=(ALL) ALL
-+%wheel ALL=(ALL) ALL
-
- ## Same thing without a password
- # %wheel ALL=(ALL) NOPASSWD: ALL
diff --git a/alpine/config-files/website-vhosts/ctu.cx.conf b/alpine/config-files/website-vhosts/ctu.cx.conf
@@ -1,43 +0,0 @@
-server {
-	listen 443 ssl;
-	listen [::]:443 ssl;
-
-	ssl_certificate "/var/lib/acme-redirect/live/ctu.cx/fullchain";
-	ssl_certificate_key "/var/lib/acme-redirect/live/ctu.cx/privkey";
-	include /etc/nginx/ssl.conf;
-	
-	server_name ctu.cx;
-
-	root /var/lib/websites/ctu.cx;
-
-	location /.well-known/host-meta {
-		return 301 https://pleroma.ctu.cx$request_uri;
-	}
-
-	location /.well-known/matrix/client {
-		return 200 '{"m.homeserver": {"base_url": "https://matrix.ctu.cx"}}';
-		add_header Content-Type application/json;
-	}
-
-	location /.well-known/matrix/server {
-		return 200 '{"m.server": "matrix.ctu.cx:443"}';
-		add_header Content-Type application/json;
-	}
-
-	location /vodafone-map {
-		proxy_set_header Accept-Encoding "";
-		proxy_pass https://netmap.vodafone.de/arcgis/rest/services/CoKart/netzabdeckung_mobilfunk_4x/MapServer;
-	}
-
-	location /magenta-at-map {
-		proxy_set_header Accept-Encoding "";
-		proxy_pass https://app.wigeogis.com/kunden/tmobile/data/geoserver.php;
-	}
-
-	location /drei-at-data {
-		proxy_set_header Accept-Encoding "";
-		proxy_pass https://www.drei.at/media/common/netzabdeckung;
-		proxy_hide_header 'access-control-allow-origin';
-		add_header 'access-control-allow-origin' '*';
-	}
-}
diff --git a/alpine/config-files/website-vhosts/photos.ctu.cx.conf b/alpine/config-files/website-vhosts/photos.ctu.cx.conf
@@ -1,20 +0,0 @@
-server {
-	listen 443 ssl;
-	listen [::]:443 ssl;
-
-	ssl_certificate "/var/lib/acme-redirect/live/photos.ctu.cx/fullchain";
-	ssl_certificate_key "/var/lib/acme-redirect/live/photos.ctu.cx/privkey";
-	include /etc/nginx/ssl.conf;
-	
-	server_name photos.ctu.cx;
-
-	root /var/lib/websites/photos.ctu.cx;
-
-	location ~* \.(html)$ {
-		add_header Last-Modified $date_gmt;
-		add_header Cache-Control 'private no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0';
-		if_modified_since off;
-		expires off;
-		etag off;
-	}
-}
diff --git a/alpine/config-files/website-vhosts/repo.f2k1.de.conf b/alpine/config-files/website-vhosts/repo.f2k1.de.conf
@@ -1,15 +0,0 @@
-server {
-	listen 443 ssl;
-	listen [::]:443 ssl;
-
-	ssl_certificate "/var/lib/acme-redirect/live/repo.f2k1.de/fullchain";
-	ssl_certificate_key "/var/lib/acme-redirect/live/repo.f2k1.de/privkey";
-	include /etc/nginx/ssl.conf;
-	
-	server_name repo.f2k1.de;
-
-	location / {
-		proxy_set_header Host archrepo.frp.ctu.cx;
-		proxy_pass http://127.0.0.1:8088;
-	}
-}
diff --git a/alpine/inventory b/alpine/inventory
@@ -1,12 +0,0 @@
-[all:vars]
-ansible_ssh_user=root
-
-[taurus]
-taurus.ctu.cx
-
-
-[wanderduene]
-wanderduene.ctu.cx
-
-[lollo]
-10.0.0.1
diff --git a/alpine/playbook-router.yml b/alpine/playbook-router.yml
@@ -1,77 +0,0 @@
----
-- hosts: all
-  remote_user: root
-  gather_facts: false
-  tasks:
-    - name: Install Python
-      raw: test -e /usr/bin/python || (apk update && apk add python3)
-
-- hosts: lollo
-  name: Install lollo
-  roles:
-    - common
-    - dnsmasq
-#    - hostapd
-#    - syncthing
-#    - frp
-  vars:
-    alpineVersion: v3.12
-    users:
-      - name: leah
-        groups: "wheel"
-        password: "$6$foobar123$1qcCmnoveirSdWY9XdgH5hCXv32hj0n/AyJX46sSp1LyGCA8QT/xxifebRxr89uIH6vwhzFGgz4.H2sG0en0f0"
-        sshKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCw/G6x8H3ojvHx3NsTswBMMmOhp48F3rea0GUniKSvRLMRIti5b7Q4P4FXnkQEtuNSR3u7gE5r4EacaLaIx7Az9SgHRoE+hdzSo4mPAwKTx/E3HZgIjdZhTDL8PAn4SZZT6RBqr/uGb+x9fdIjY0FbdNBLjq0MNnG3T+qd1joUL8JXoS7F//ac52RhHlsA5qJXFDOhpqR/7hRMwOFNH0GKaLN1xQKcOjhpIcdswpOf8kRDVpT7xOYwfXCFF4MaY2M8047WKarvEnGdADIIw6bvWsdJINehtOQmYEFRaMuaWp1d9bglZXZKPQKNubv5lqneMP4AI7ImDYjgW6eNLIT1 cardno:000603502829"
-    network:
-      hostname: lollo
-      domain: ctu.cx
-      nameservers:
-        - 1.1.1.1
-        - 8.8.8.8
-      useFerm: true
-      useAwall: false
-      vlanSupport:   true
-      natSupport:    true
-      bridgeSupport: true
-      interfaces:
-        - name: lo
-          loopback: true
-        - name: eth0
-          ipv4:
-            dhcp: true
-          ipv6:
-            stateless: true
-        - name: eth0.5
-          manual: true
-        - name: brlan0
-          bridge_ports: eth0.5
-          bridge_stp: false
-          ipv4:
-            addresses:
-             - 10.0.0.1
-             - 195.39.246.40
-            netmask: 255.255.255.0
-          ipv6:
-            address: 2a0f:4ac0:acab::1
-            netmask: 64
-    hostapd:
-      interface: wlan0
-      bridge: brlan0
-      channel: 1
-      ssid: legacy.home.ctu.cx
-      passphrase: wasd1998
-    dnsmasq:
-      dhcp: true
-    syncthing:
-      disableReverseProxy: true
-      guiAddress: 0.0.0.0:8384
-    frpc:
-      serverAddress: wanderduene.ctu.cx
-      serverPort: 5050
-      token: "{{ lookup('community.general.passwordstore', 'server/wanderduene/frps/token returnall=true')}}"
-      dashboard: false
-      tunnels:
-        - name: lollo-ssh
-          type: tcp
-          local_ip: 127.0.0.1
-          local_port: 22
-          remote_port: 2202-
\ No newline at end of file
diff --git a/alpine/playbook-servers.yml b/alpine/playbook-servers.yml
@@ -1,345 +0,0 @@
----
-- hosts: all
-  remote_user: root
-  gather_facts: false
-  tasks:
-    - name: Install Python
-      raw: test -e /usr/bin/python || (apk update && apk add python3)
-
-
-- hosts: wanderduene
-  name:  Install wanderduene
-  roles: 
-#    - common
-#    - bind
-#    - acme-redirect
-#    - nginx
-#    - gitolite
-#    - cgit
-    - oeffisearch
-    - oeffi-web
-#    - maddy
-#    - prometheus
-#    - radicale
-#    - websites
-#    - pleroma
-#    - synapse
-#    - grafana
-#    - frp
-#    - backup 
-  vars:
-    alpineVersion: edge
-    users:
-      - name: leah
-        groups: "wheel"
-        password: "$6$foobar123$1qcCmnoveirSdWY9XdgH5hCXv32hj0n/AyJX46sSp1LyGCA8QT/xxifebRxr89uIH6vwhzFGgz4.H2sG0en0f0"
-        sshKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCw/G6x8H3ojvHx3NsTswBMMmOhp48F3rea0GUniKSvRLMRIti5b7Q4P4FXnkQEtuNSR3u7gE5r4EacaLaIx7Az9SgHRoE+hdzSo4mPAwKTx/E3HZgIjdZhTDL8PAn4SZZT6RBqr/uGb+x9fdIjY0FbdNBLjq0MNnG3T+qd1joUL8JXoS7F//ac52RhHlsA5qJXFDOhpqR/7hRMwOFNH0GKaLN1xQKcOjhpIcdswpOf8kRDVpT7xOYwfXCFF4MaY2M8047WKarvEnGdADIIw6bvWsdJINehtOQmYEFRaMuaWp1d9bglZXZKPQKNubv5lqneMP4AI7ImDYjgW6eNLIT1 cardno:000603502829"
-    network:
-      hostname: wanderduene
-      domain: ctu.cx
-      nameservers:
-        - 1.1.1.1
-        - 8.8.8.8
-      useFerm: false
-      useAwall: true
-      awall:
-        zones:
-          WAN:
-            - iface: eth0
-        policys:
-          - in: _fw
-            action: accept
-          - in: _fw
-            out:  WAN
-            action: accept
-          - in: WAN
-            action: drop
-        filters:
-          - in: _fw
-            out: WAN
-            service:
-              - dns
-              - http
-              - https
-              - ssh
-          - in: WAN
-            out: _fw
-            service: 
-              - ping
-            action: accept
-      vlanSupport: false
-      natSupport: false
-      interfaces:
-        - name: lo
-          loopback: true
-        - name: eth0
-          ipv4:
-            address: 46.38.253.139
-            gateway: 46.38.253.1
-            netmask: 255.255.255.0
-          ipv6:
-            address: 2a03:4000:1:45d::1
-            gateway: fe80::1
-            netmask: 64
-    service:
-      bind:
-        domains:
-          - ctu.cx
-          - ctucx.de
-          - thein.ovh
-          - antifa.jetzt
-          - oeffisear.ch
-    acme_redirect_certs:
-      wanderduene.ctu.cx:
-        dns_names: 
-          - wanderduene.ctu.cx
-        renew_tasks:
-          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/wanderduene.ctu.cx
-          - sudo rc-service nginx restart
-          - sudo rc-service maddy restart
-      metrics.wanderduene.ctu.cx:
-        dns_names: 
-          - metrics.wanderduene.ctu.cx
-        renew_tasks:
-          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/metrics.wanderduene.ctu.cx
-          - sudo rc-service nginx restart
-      ctucx.de:
-        dns_names:
-          - ctucx.de
-        renew_tasks:
-          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/ctucx.de
-          - sudo rc-service nginx restart
-      ctu.cx:
-        dns_names:
-          - ctu.cx
-        renew_tasks:
-          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/ctu.cx
-          - sudo rc-service nginx restart
-      matrix.ctu.cx:
-        dns_names:
-          - matrix.ctu.cx
-        renew_tasks:
-          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/matrix.ctu.cx
-          - sudo rc-service nginx restart
-      dav.ctu.cx:
-        dns_names: 
-          - dav.ctu.cx
-        renew_tasks:
-          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/dav.ctu.cx
-          - sudo rc-service nginx restart
-      cgit.ctu.cx:
-        dns_names:
-          - cgit.ctu.cx
-        renew_tasks:
-          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/cgit.ctu.cx
-          - sudo rc-service nginx restart
-      grafana.ctu.cx:
-        dns_names:
-          - grafana.ctu.cx
-        renew_tasks:
-          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/grafana.ctu.cx
-          - sudo rc-service nginx restart
-      pleroma.ctu.cx:
-        dns_names:
-          - pleroma.ctu.cx
-        renew_tasks:
-          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/pleroma.ctu.cx
-          - sudo rc-service nginx restart
-      frp.ctu.cx:
-        dns_names:
-          - frp.ctu.cx
-          - stasicontainer-mac.frp.ctu.cx
-          - stasicontainer.frp.ctu.cx
-          - coladose.frp.ctu.cx
-          - toaster.frp.ctu.cx
-          - archrepo.frp.ctu.cx
-          - isa.frp.ctu.cx
-          - isa-mac.frp.ctu.cx
-        renew_tasks:
-          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/frp.ctu.cx
-          - sudo rc-service nginx restart
-      oeffi.ctu.cx:
-        dns_names:
-          - oeffi.ctu.cx
-        renew_tasks:
-          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/oeffi.ctu.cx
-          - sudo rc-service nginx restart
-      repo.f2k1.de:
-        dns_names:
-          - repo.f2k1.de
-        renew_tasks:
-          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/repo.f2k1.de
-          - sudo rc-service nginx restart
-      oeffisear.ch:
-        dns_names:
-          - oeffisear.ch
-        renew_tasks:
-          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/oeffisear.ch
-          - sudo rc-service nginx restart
-    nginx:
-      ssl_cert: "/var/lib/acme-redirect/live/wanderduene.ctu.cx/fullchain"
-      ssl_privkey: "/var/lib/acme-redirect/live/wanderduene.ctu.cx/privkey"
-    cgit:
-      domain: "cgit.ctu.cx"
-      ssl_cert: "/var/lib/acme-redirect/live/cgit.ctu.cx/fullchain"
-      ssl_privkey: "/var/lib/acme-redirect/live/cgit.ctu.cx/privkey"
-    oeffisearch:
-      domain: "oeffisear.ch"
-      ssl_cert: "/var/lib/acme-redirect/live/oeffisear.ch/fullchain"
-      ssl_privkey: "/var/lib/acme-redirect/live/oeffisear.ch/privkey"
-    oeffi_web:
-      domain: "oeffi.ctu.cx"
-      ssl_cert: "/var/lib/acme-redirect/live/oeffi.ctu.cx/fullchain"
-      ssl_privkey: "/var/lib/acme-redirect/live/oeffi.ctu.cx/privkey"
-    maddy:
-      hostname: "wanderduene.ctu.cx"
-      ssl_cert: "/var/lib/acme-redirect/live/wanderduene.ctu.cx/fullchain"
-      ssl_privkey: "/var/lib/acme-redirect/live/wanderduene.ctu.cx/privkey"
-    prometheus:
-      domain: "metrics.wanderduene.ctu.cx"
-      ssl_cert: "/var/lib/acme-redirect/live/metrics.wanderduene.ctu.cx/fullchain"
-      ssl_privkey: "/var/lib/acme-redirect/live/metrics.wanderduene.ctu.cx/privkey"
-    radicale:
-      domain: "dav.ctu.cx"
-      users: "{{ lookup('community.general.passwordstore', 'server/wanderduene/radicale.users returnall=true')}}"
-      ssl_cert: "/var/lib/acme-redirect/live/dav.ctu.cx/fullchain"
-      ssl_privkey: "/var/lib/acme-redirect/live/dav.ctu.cx/privkey"
-    synapse:
-      domain: "matrix.ctu.cx"
-      ssl_cert: "/var/lib/acme-redirect/live/matrix.ctu.cx/fullchain"
-      ssl_privkey: "/var/lib/acme-redirect/live/matrix.ctu.cx/privkey"
-    grafana:
-      domain: "grafana.ctu.cx"
-      ssl_cert: "/var/lib/acme-redirect/live/grafana.ctu.cx/fullchain"
-      ssl_privkey: "/var/lib/acme-redirect/live/grafana.ctu.cx/privkey"
-    pleroma:
-      domain: "pleroma.ctu.cx"
-      ssl_cert: "/var/lib/acme-redirect/live/pleroma.ctu.cx/fullchain"
-      ssl_privkey: "/var/lib/acme-redirect/live/pleroma.ctu.cx/privkey"
-    frps:
-      token: "{{ lookup('community.general.passwordstore', 'server/wanderduene/frps/token returnall=true')}}"
-      port: 5050
-      ssl_cert: "/var/lib/acme-redirect/live/frp.ctu.cx/fullchain"
-      ssl_privkey: "/var/lib/acme-redirect/live/frp.ctu.cx/privkey"
-      vhost_domain: "frp.ctu.cx"
-      vhost_port: 8088
-      vhosts:
-        - stasicontainer-mac
-        - stasicontainer
-        - coladose
-        - toaster
-        - archrepo
-        - isa
-        - isa-mac
-    gitolite_initialKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCw/G6x8H3ojvHx3NsTswBMMmOhp48F3rea0GUniKSvRLMRIti5b7Q4P4FXnkQEtuNSR3u7gE5r4EacaLaIx7Az9SgHRoE+hdzSo4mPAwKTx/E3HZgIjdZhTDL8PAn4SZZT6RBqr/uGb+x9fdIjY0FbdNBLjq0MNnG3T+qd1joUL8JXoS7F//ac52RhHlsA5qJXFDOhpqR/7hRMwOFNH0GKaLN1xQKcOjhpIcdswpOf8kRDVpT7xOYwfXCFF4MaY2M8047WKarvEnGdADIIw6bvWsdJINehtOQmYEFRaMuaWp1d9bglZXZKPQKNubv5lqneMP4AI7ImDYjgW6eNLIT1 cardno:000603502829"
-
-
-- hosts: taurus
-  name: Install taurus
-  roles:
-#    - common
-#    - bind
-#    - acme-redirect
-#    - nginx
-#    - syncthing
-#    - websites
-#    - rest-server
-  vars:
-    alpineVersion: edge
-    users:
-      - name: leah
-        groups: "wheel"
-        password: "$6$foobar123$1qcCmnoveirSdWY9XdgH5hCXv32hj0n/AyJX46sSp1LyGCA8QT/xxifebRxr89uIH6vwhzFGgz4.H2sG0en0f0"
-        sshKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCw/G6x8H3ojvHx3NsTswBMMmOhp48F3rea0GUniKSvRLMRIti5b7Q4P4FXnkQEtuNSR3u7gE5r4EacaLaIx7Az9SgHRoE+hdzSo4mPAwKTx/E3HZgIjdZhTDL8PAn4SZZT6RBqr/uGb+x9fdIjY0FbdNBLjq0MNnG3T+qd1joUL8JXoS7F//ac52RhHlsA5qJXFDOhpqR/7hRMwOFNH0GKaLN1xQKcOjhpIcdswpOf8kRDVpT7xOYwfXCFF4MaY2M8047WKarvEnGdADIIw6bvWsdJINehtOQmYEFRaMuaWp1d9bglZXZKPQKNubv5lqneMP4AI7ImDYjgW6eNLIT1 cardno:000603502829"
-    network:
-      hostname: taurus
-      domain: ctu.cx
-      nameservers:
-        - 1.1.1.1
-        - 8.8.8.8
-      useFerm: false
-      useAwall: true
-      awall:
-        zones:
-          WAN:
-            - iface: eth0
-        policys:
-          - in: _fw
-            action: accept
-          - in: _fw
-            out:  WAN
-            action: accept
-          - in: WAN
-            action: drop
-        filters:
-          - in: _fw
-            out: WAN
-            service:
-              - dns
-              - http
-              - https
-              - ssh
-          - in: WAN
-            out: _fw
-            service: 
-              - ping
-            action: accept
-      vlanSupport: false
-      natSupport: false
-      interfaces:
-        - name: lo
-          loopback: true
-        - name: eth0
-          ipv4:
-            address: 37.221.196.131
-            gateway: 37.221.196.1
-            netmask: 255.255.255.0
-          ipv6:
-            address: 2a03:4000:9:f8::1
-            gateway: fe80::1
-            netmask: 64
-    service:
-      bind:
-        domains:
-          - ctu.cx
-          - ctucx.de
-          - thein.ovh
-          - antifa.jetzt
-          - oeffisear.ch
-    nginx:
-      ssl_cert: "/var/lib/acme-redirect/live/taurus.ctu.cx/fullchain"
-      ssl_privkey: "/var/lib/acme-redirect/live/taurus.ctu.cx/privkey"
-    acme_redirect_certs:
-      taurus.ctu.cx:
-        dns_names: 
-          - taurus.ctu.cx
-        renew_tasks:
-          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/taurus.ctu.cx
-          - sudo rc-service nginx restart
-      syncthing.ctu.cx:
-        dns_names: 
-          - syncthing.ctu.cx
-        renew_tasks:
-          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/syncthing.ctu.cx
-          - sudo rc-service nginx restart
-      restic.ctu.cx:
-        dns_names: 
-          - restic.ctu.cx
-        renew_tasks:
-          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/restic.ctu.cx
-          - sudo rc-service nginx restart
-      photos.ctu.cx:
-        dns_names: 
-          - photos.ctu.cx
-        renew_tasks:
-          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/photo.ctu.cx
-          - sudo rc-service nginx restart
-    syncthing:
-      domain: "syncthing.ctu.cx"
-      ssl_cert: "/var/lib/acme-redirect/live/syncthing.ctu.cx/fullchain"
-      ssl_privkey: "/var/lib/acme-redirect/live/syncthing.ctu.cx/privkey"
-    rest_server:
-      domain: "restic.ctu.cx"
-      ssl_cert: "/var/lib/acme-redirect/live/restic.ctu.cx/fullchain"
-      ssl_privkey: "/var/lib/acme-redirect/live/restic.ctu.cx/privkey"
-      passwd: "{{ lookup('community.general.passwordstore', 'server/taurus/rest-server.htpasswd returnall=true')}}"
diff --git a/alpine/roles/acme-redirect/tasks/main.yml b/alpine/roles/acme-redirect/tasks/main.yml
@@ -1,77 +0,0 @@
----
-
-- name: "Install package: acme-redirect"
-  apk:
-   name: acme-redirect
-   state: present
-   update_cache: yes
-
-- name: create sudoers file for acme-redirect
-  copy:
-    content: "acme-redirect ALL=NOPASSWD:/sbin/rc-service\n"
-    dest: /etc/sudoers.d/acme-redirect
-
-- name: copy acme-redirect config to destination host
-  copy:
-    src: config-files/acme-redirect/acme-redirect.conf
-    dest: /etc/acme-redirect.conf
-    owner: acme-redirect
-    group: acme-redirect
-
-- name: clean cert-config directory
-  file:
-    state: "{{ item }}"
-    path: /etc/acme-redirect.d
-    owner: acme-redirect
-    group: acme-redirect
-    mode: 0755
-  with_items:
-    - absent
-    - directory
-
-- name: create configs for defined certs
-  template:
-    src: acme-redirect.conf.j2
-    dest: /etc/acme-redirect.d/{{item.key}}.conf
-    owner: acme-redirect
-    group: acme-redirect
-    mode: 0644
-  loop: "{{ lookup('dict', acme_redirect_certs) }}"
-
-- name: Copy http(s) firewall-rule to destination host
-  copy:
-    src: config-files/awall/web.json
-    dest: /etc/awall/optional/web.json
-    validate: jq '.' %s
-
-- name: "Enable firewall-rule for: http, https"
-  awall:
-    name: web
-    state: enabled
-    activate: yes
-
-- name: "Enable and start service: acme-redirect"
-  service:
-   name: acme-redirect
-   enabled: yes
-   state: restarted
-
-- command:
-    cmd: acme-redirect check -q
-  register: acme_check
-  become: yes
-  become_user: acme-redirect
-
-- fail:
-    msg: "Check of Certs failed: {{acme_check.stdout}}"
-  when: acme_check.stdout | length > 0
-
-- command:
-    cmd: acme-redirect renew -q
-  register: acme_renew
-  become: yes
-  become_user: acme-redirect
-
-- fail:
-    msg: "Renew of certs failed: {{acme_renew.stdout}}"
-  when: acme_renew.stdout | length > 0
diff --git a/alpine/roles/acme-redirect/templates/acme-redirect.conf.j2 b/alpine/roles/acme-redirect/templates/acme-redirect.conf.j2
@@ -1,12 +0,0 @@
-[cert]
-name      = "{{item.key}}"
-dns_names = [
-{% for domain in item.value.dns_names %}
-    "{{domain}}",
-{% endfor %}
-]
-exec = [
-{% for task in item.value.renew_tasks %}
-    "{{task}}",
-{% endfor %}
-]
diff --git a/alpine/roles/backup/tasks/main.yml b/alpine/roles/backup/tasks/main.yml
@@ -1,18 +0,0 @@
----
-
-- include: wanderduene.yml
-  when: network.hostname == "wanderduene"
-
-- name: Copy backup-script to server
-  copy:
-    src: scripts/restic-backup-{{network.hostname}}.sh
-    dest: /root/restic-backup.sh
-    mode: 0755
-
-- name: create crontab entry
-  cron:
-    name: "run restic-backups"
-    special_time: daily
-    user: root
-    job: "/root/restic-backup.sh > /dev/null"
-    state: present
diff --git a/alpine/roles/backup/tasks/wanderduene.yml b/alpine/roles/backup/tasks/wanderduene.yml
@@ -1,41 +0,0 @@
----
-
-- name: create password file for rest-server
-  copy:
-    content: "{{ lookup('community.general.passwordstore', 'server/taurus/rest-server.plain returnall=true')}}"
-    dest:    /var/lib/restic-password
-    mode:    0755
-    owner:   root
-    group:   root
-
-- name: create password files for services
-  copy:
-    content: "{{ lookup('community.general.passwordstore', 'server/{{network.hostname}}/restic/{{item}} returnall=true')}}"
-    dest:    "/var/lib/{{item}}/restic-password"
-    owner:   "{{item}}"
-    group:   "{{item}}"
-    mode:    0700
-  loop:
-    - maddy
-    - radicale
-    - git
-    - pleroma
-    - synapse
-    - oeffisearch
-
-- name: create password file for postgresql
-  copy:
-    content: "{{ lookup('community.general.passwordstore', 'server/{{network.hostname}}/restic/postgresql returnall=true')}}"
-    dest:    /var/lib/postgresql/restic-password
-    owner:   postgres
-    group:   postgres
-    mode:    0700
-
-- name: create password file for htmldir
-  copy:
-    content: "{{ lookup('community.general.passwordstore', 'server/{{network.hostname}}/restic/websites returnall=true')}}"
-    dest:    /var/lib/websites/restic-password
-    owner:   leah
-    group:   leah
-    mode:    0700
-
diff --git a/alpine/roles/bind/tasks/main.yml b/alpine/roles/bind/tasks/main.yml
@@ -1,45 +0,0 @@
----
-
-- name: "Install package: bind"
-  apk:
-   name: bind
-   state: present
-   update_cache: yes
-
-- name: clone the dns-zones
-  git:
-    repo: 'https://cgit.ctu.cx/dns-zones'
-    dest: /var/lib/named/zones
-
-- name: change ownership of /var/lib/named
-  file:
-    path: /var/lib/named
-    owner: named
-    group: named
-    state: directory
-    recurse: yes
-
-- name: copy named.conf to destination host
-  template:
-    src: named.conf.j2
-    dest: /etc/bind/named.conf
-    owner:  named
-    group: named
-
-- name: Enable and start bind dns-server
-  service:
-    name: named
-    enabled: yes
-    state: restarted
-
-- name: Copy dns firewall-rule to destination host
-  copy:
-    src: config-files/awall/dns.json
-    dest: /etc/awall/optional/dns.json
-    validate: jq '.' %s
-
-- name: "Activate firewall-rule for: dns"
-  awall:
-    name: dns
-    state: enabled
-    activate: yes
diff --git a/alpine/roles/bind/templates/named.conf.j2 b/alpine/roles/bind/templates/named.conf.j2
@@ -1,13 +0,0 @@
-options { 
-	directory "/var/lib/named"; 
-}; 
-
-{% for domain in service.bind.domains %}
-zone "{{ domain }}" in {
-	type master;
-	file "/var/lib/named/zones/{{ domain }}.zone";
-};
-
-{% endfor %}
-
-controls { };-
\ No newline at end of file
diff --git a/alpine/roles/cgit/tasks/main.yml b/alpine/roles/cgit/tasks/main.yml
@@ -1,55 +0,0 @@
----
-
-- name: "Install Package(s): cgit and it's dependecys"
-  apk:
-   name: cgit git spawn-fcgi fcgiwrap py3-markdown py3-pygments
-   state: present
-   update_cache: yes
-
-- name: "Create fcgi-service for: cgit"
-  file:
-    src: /etc/init.d/spawn-fcgi
-    dest: /etc/init.d/spawn-fcgi.cgit
-    state: link
-
-- name: "Create config for cgit's fcgi-service"
-  copy: 
-    content: "FCGI_PORT=8001\nFCGI_PROGRAM=/usr/bin/fcgiwrap"
-    dest: /etc/conf.d/spawn-fcgi.cgit
-
-- name: "Restart and enable service: spawn-fcgi.cgit"
-  service:
-   name: spawn-fcgi.cgit
-   enabled: yes
-   state: restarted
-
-- name: copy nginx-vhost for cgit to destination host 
-  template: 
-    src: cgit-vhost.conf.j2
-    dest: /etc/nginx/conf.d/cgit.conf
-    mode: 0644
-    owner: nginx
-    group: nginx
-
-- name: copy cgit-config to destination host 
-  copy: 
-    src: config-files/cgit/cgitrc
-    dest: /etc/cgitrc
-    mode: 0644
-
-- name: copy cgit.css to destination host 
-  copy: 
-    src: config-files/cgit/cgit.css
-    dest: /usr/share/webapps/cgit/cgit-ctucx.css
-    mode: 0644
-
-- name: adding user nginx to group git
-  user:
-    name: nginx
-    groups: git
-    append: yes
-
-- name: "Restart service: nginx"
-  service:
-    name: nginx
-    state: restarted
diff --git a/alpine/roles/cgit/templates/cgit-vhost.conf.j2 b/alpine/roles/cgit/templates/cgit-vhost.conf.j2
@@ -1,21 +0,0 @@
-server {
-	listen 443 ssl;
-	listen [::]:443 ssl;
-
-	ssl_certificate "{{cgit.ssl_cert}}";
-	ssl_certificate_key "{{cgit.ssl_privkey}}";
-	include /etc/nginx/ssl.conf;
-	
-	server_name {{cgit.domain}};
-
-	root /usr/share/webapps/cgit;
-	try_files $uri @cgit;
-
-	location @cgit {
-		include fastcgi_params;
-		fastcgi_pass localhost:8001;
-		fastcgi_param SCRIPT_FILENAME /usr/share/webapps/cgit/cgit.cgi;
-		fastcgi_param PATH_INFO $uri;
-		fastcgi_param QUERY_STRING $args;
-	}
-}
diff --git a/alpine/roles/common/tasks/chrony.yml b/alpine/roles/common/tasks/chrony.yml
@@ -1,14 +0,0 @@
----
-
-- name: "Install package: chrony" 
-  apk:
-    name: chrony 
-    state: present
-    update_cache: yes
-
-- name: "Enable and restart service: chrony"
-  service:
-    name: chronyd
-    enabled: yes
-    state: restarted
-  ignore_errors: yes-
\ No newline at end of file
diff --git a/alpine/roles/common/tasks/firewall-awall.yml b/alpine/roles/common/tasks/firewall-awall.yml
@@ -1,43 +0,0 @@
----
-- name: "Install Package: awall"
-  apk:
-    name: awall lua-lyaml ip6tables
-
-- name: "Load kernel module: iptables"
-  raw: "modprobe ip_tables"
-
-- name: Copy base custom-services configguration to destination host
-  copy:
-    src: config-files/awall/custom-services.json
-    dest: /etc/awall/private/custom-services.json
-    validate: jq '.' %s
-
-- name: Copy base awall(firewall) configguration to destination host
-  template:
-    src: awall-baseconfig.yaml.j2
-    dest: /etc/awall/private/base.yaml
-#    validate: jq '.' %s
-
-- name: Copy awall(firewall) configguration for ssh to destination host
-  copy:
-    src: config-files/awall/ssh.json
-    dest: /etc/awall/optional/ssh.json
-    validate: jq '.' %s
-
-- name: "Activate firewall-rule for: ssh"
-  awall:
-   name: ssh
-   state: enabled
-   activate: yes
-
-- name: "Enable and start service: iptables"
-  service:
-   name: iptables
-   enabled: yes
-   state: started
-
-- name: "Enable and start service: ip6tables"
-  service:
-   name: ip6tables
-   enabled: yes
-   state: started
diff --git a/alpine/roles/common/tasks/firewall-ferm.yml b/alpine/roles/common/tasks/firewall-ferm.yml
@@ -1,23 +0,0 @@
----
-
-- name: "Install Package: ferm"
-  apk:
-    name: ferm
-
-- name: copy service file to destination
-  copy:
-    src: config-files/ferm/ferm.initd
-    dest: /etc/init.d/ferm
-    mode: 0755
-
-- name: copy ferm config to destination
-  copy:
-    src: "config-files/ferm/ferm-{{ network.hostname }}.conf"
-    dest: /etc/ferm.conf
-    mode: 0644
-
-- name: "Enable and start service: ferm"
-  service:
-   name: ferm
-   enabled: yes
-   state: started
diff --git a/alpine/roles/common/tasks/main.yml b/alpine/roles/common/tasks/main.yml
@@ -1,21 +0,0 @@
----
-- include: packages.yml
-
-- include: network.yml
-
-- include: chrony.yml
-
-- include: sudo.yml
-
-- include: sshd.yml
-
-- include: users.yml
-
-- include: firewall-awall.yml
-  when: network.useAwall is true 
-
-- include: firewall-ferm.yml
-  when: network.useFerm is true 
-
-- include: node-exporter.yml
-  when: alpineVersion == "edge"
diff --git a/alpine/roles/common/tasks/network.yml b/alpine/roles/common/tasks/network.yml
@@ -1,80 +0,0 @@
----
-
-- name: "create file: /etc/network/interfaces"
-  template:
-    src: interfaces.conf.j2
-    dest: /etc/network/interfaces
-    mode: 0755
-
-- name: "create file: /etc/hosts"
-  template:
-    src: hosts.conf.j2
-    dest: /etc/hosts
-    mode: 0755
-
-- name: "create file: /etc/resolv.conf"
-  template:
-    src: resolv.conf.j2
-    dest: /etc/resolv.conf
-    mode: 0755
-
-- name: "create file: /etc/hostname"
-  copy:
-    content: "{{network.hostname}}"
-    dest: /etc/hostname
-  register: hostname
-
-- name: Change hostname of running system
-  hostname:
-    name: "{{network.hostname}}"
-    use: alpine
-  when: hostname.changed
-
-- name: enable NAT support
-  ansible.posix.sysctl:
-    name: net.ipv4.ip_forward
-    value: '1'
-    sysctl_set: yes
-    state: present
-    reload: yes
-  when: network.natSupport is true
-
-- name: disable NAT support
-  ansible.posix.sysctl:
-    name: net.ipv4.ip_forward
-    value: '0'
-    sysctl_set: yes
-    state: absent
-    reload: yes
-  when: network.natSupport is false
-
-- name: "Install package: vlan"
-  apk:
-    name: vlan
-    update_cache: yes
-  when: network.vlanSupport is true
-
-- name: "Remove package: vlan"
-  apk:
-    name: vlan
-    state: absent
-    update_cache: yes
-  when: network.vlanSupport is false  
-
-- name: "Install package: bridge"
-  apk:
-    name: bridge
-    update_cache: yes
-  when: network.bridgeSupport is true
-
-- name: "Remove package: bridge"
-  apk:
-    name: bridge
-    state: absent
-    update_cache: yes
-  when: network.bridgeSupport is false  
-
-- name: "Restart service: networking"
-  service:
-    name: networking
-    state: restarted
diff --git a/alpine/roles/common/tasks/node-exporter.yml b/alpine/roles/common/tasks/node-exporter.yml
@@ -1,10 +0,0 @@
----
-- name: "Install package: node-exporter"
-  apk:
-    name: prometheus-node-exporter
-
-- name: "Enable and start service: node-exporter"
-  service:
-   name: node-exporter
-   enabled: yes
-   state: started
diff --git a/alpine/roles/common/tasks/packages.yml b/alpine/roles/common/tasks/packages.yml
@@ -1,32 +0,0 @@
----
-
-- name: get signature from personal repo
-  get_url:
-    url: http://home.f2k1.de:8080/leah-5f817de5.rsa.pub
-    dest: /etc/apk/keys/leah-5f817de5.rsa.pub
-
-- name: "Update file: /etc/apk/repositories"
-  template:
-    src: repositories.j2
-    dest: /etc/apk/repositories
-
-- name: update system
-  raw: "apk update && apk upgrade"
-
-- name: Install common packages
-  apk:
-    name:
-      - nano
-      - sudo
-      - htop
-      - tar
-      - unzip
-      - curl 
-      - wget
-      - tmux
-      - git
-      - patch
-      - jq
-      - restic
-    update_cache: yes
-    
diff --git a/alpine/roles/common/tasks/sshd.yml b/alpine/roles/common/tasks/sshd.yml
@@ -1,18 +0,0 @@
----
-
-- name: "Install package: openssh" 
-  apk:
-   name: openssh 
-   state: present
-   update_cache: yes
-
-- name: "Patch file: /etc/ssh/sshd_config"
-  patch:
-    src: config-files/ssh/sshd_config.patch
-    dest: /etc/ssh/sshd_config
-
-- name: "Enable and restart service: sshd"
-  service:
-   name: sshd
-   enabled: yes
-   state: restarted
diff --git a/alpine/roles/common/tasks/sudo.yml b/alpine/roles/common/tasks/sudo.yml
@@ -1,12 +0,0 @@
----
-
-- name: "Install package: sudo" 
-  apk:
-   name: sudo 
-   state: present
-   update_cache: yes
-
-- name: "Patch file: /etc/sudoers"
-  patch:
-    src: config-files/sudo/sudoers.patch
-    dest: /etc/sudoers
diff --git a/alpine/roles/common/tasks/users.yml b/alpine/roles/common/tasks/users.yml
@@ -1,34 +0,0 @@
----
-
-- name: "Add groups" 
-  group:
-    name: "{{item.name}}"
-    state: present
-  loop: "{{ users }}"
-
-- name: "Add users" 
-  user:
-    append: yes
-    name: "{{item.name}}"
-    group: "{{item.name}}"
-    groups: "{{item.groups}}"
-    password: "{{item.password}}"
-  loop: "{{ users }}"
-
-- name: "Create ~/.ssh directory for users"
-  file:
-    state: directory
-    dest: "/home/{{item.name}}/.ssh/"
-    mode: 0755
-    owner: "{{item.name}}"
-    group: "{{item.name}}"
-  loop: "{{ users }}"
-
-- name: "Place ssh-key for users"
-  copy:
-    content: "{{item.sshKey}}"
-    dest: "/home/{{item.name}}/.ssh/authorized_keys"
-    mode: 0644    
-    owner: "{{item.name}}"
-    group: "{{item.name}}"
-  loop: "{{ users }}"
diff --git a/alpine/roles/common/templates/awall-baseconfig.yaml.j2 b/alpine/roles/common/templates/awall-baseconfig.yaml.j2
@@ -1,17 +0,0 @@
-description: Base zones and policies for {{network.hostname}}
-import:
-  - custom-services
-
-zone:
-  {{ network.awall.zones | to_yaml | trim | indent(2) }}
-
-policy:
-  {{ network.awall.policys | to_yaml| trim | indent(2) }}
-
-filter:
-  {{ network.awall.filters | to_yaml | trim | indent(2) }}
-
-{% if network.awall.snat is defined %}
-snat:
-  - out: {{ network.awall.snat }}
-{% endif %}-
\ No newline at end of file
diff --git a/alpine/roles/common/templates/hosts.conf.j2 b/alpine/roles/common/templates/hosts.conf.j2
@@ -1,2 +0,0 @@
-127.0.0.1	localhost localhost.localdomain {{ network.hostname }} {{ network.hostname }}.{{ network.domain }}
-::1			localhost localhost.localdomain {{ network.hostname }} {{ network.hostname }}.{{ network.domain }}
diff --git a/alpine/roles/common/templates/interfaces.conf.j2 b/alpine/roles/common/templates/interfaces.conf.j2
@@ -1,58 +0,0 @@
-{% for interface in network.interfaces %}
-auto {{ interface.name }}
-{% if interface.loopback is defined %}
-iface {{ interface.name }} inet loopback
-{% elif interface.manual is defined %}
-iface {{ interface.name }} inet manual
-{% else %}
-{% if interface.ipv4.dhcp is defined %}
-iface {{ interface.name }} inet dhcp
-{% else %}
-iface {{ interface.name }} inet static
-{% if network.hostname is defined %}
-	hostname {{ network.hostname }}
-{% endif %}
-{% if interface.bridge_ports is defined %}
-	bridge-ports {{interface.bridge_ports}}
-{% endif %}
-{% if interface.bridge_stp is defined %}
-{% if interface.bridge_stp is true %}
-	bridge-stp 1
-{% else %}
-	bridge-stp 0
-{% endif %}
-{% endif %}
-{% if interface.ipv4.address is defined %}
-	address {{ interface.ipv4.address }}
-{% endif %}
-{% if interface.ipv4.netmask is defined %}
-	netmask {{ interface.ipv4.netmask }}
-{% endif %}
-{% if interface.ipv4.gateway is defined %}
-	gateway {{ interface.ipv4.gateway }}
-{% endif %}
-
-{% if interface.ipv6 is defined %}
-{% if interface.ipv6.stateless is defined %}
-iface {{ interface.name }} inet6 manual
-	pre-up echo 1 > /proc/sys/net/ipv6/conf/eth0/accept_ra
-{% else %}
-iface {{ interface.name }} inet6 static
-{% if network.hostname is defined %}
-	hostname {{ network.hostname }}
-{% endif %}
-{% if interface.ipv6.address is defined %}
-	address {{ interface.ipv6.address }}
-{% endif %}
-{% if interface.ipv6.netmask is defined %}
-	netmask {{ interface.ipv6.netmask }}
-{% endif %}
-{% if interface.ipv6.gateway is defined %}
-	gateway {{ interface.ipv6.gateway }}
-{% endif %}
-{% endif %}
-{% endif %}
-{% endif %}
-{% endif %}
-
-{% endfor %}-
\ No newline at end of file
diff --git a/alpine/roles/common/templates/repositories.j2 b/alpine/roles/common/templates/repositories.j2
@@ -1,6 +0,0 @@
-http://home.f2k1.de:8080/alpine-pkgs
-https://dl-cdn.alpinelinux.org/alpine/{{alpineVersion}}/main
-https://dl-cdn.alpinelinux.org/alpine/{{alpineVersion}}/community
-{% if alpineVersion == "edge" %}
-https://dl-cdn.alpinelinux.org/alpine/{{alpineVersion}}/testing
-{% endif %}-
\ No newline at end of file
diff --git a/alpine/roles/common/templates/resolv.conf.j2 b/alpine/roles/common/templates/resolv.conf.j2
@@ -1,4 +0,0 @@
-search {{ network.domain }}
-{% for nameserver in network.nameservers %}
-nameserver {{ nameserver }}
-{% endfor %}
diff --git a/alpine/roles/dnsmasq/tasks/main.yml b/alpine/roles/dnsmasq/tasks/main.yml
@@ -1,18 +0,0 @@
----
-
-- name: "Install package: dnsmasq" 
-  apk:
-   name: dnsmasq 
-   state: present
-   update_cache: yes
-
-- name: "create file: /etc/dnsmasq.conf"
-  template:
-    src: dnsmasq.conf.j2
-    dest: /etc/dnsmasq.d/ansible.conf
-
-- name: "Enable and restart service: dnsmasq"
-  service:
-   name: dnsmasq
-   enabled: yes
-   state: restarted
diff --git a/alpine/roles/dnsmasq/templates/dnsmasq.conf.j2 b/alpine/roles/dnsmasq/templates/dnsmasq.conf.j2
@@ -1,34 +0,0 @@
-except-interface=eth0
-no-resolv
-
-# filter what we send upstream
-domain-needed
-bogus-priv
-
-server=1.1.1.1@eth0
-server=1.0.0.1@eth0
-server=8.8.8.8@eth0
-server=8.8.4.4@eth0
-
-# allow /etc/hosts and dhcp lookups for local domains
-local=/home.ctu.cx/
-domain=home.ctu.cx
-
-expand-hosts
-read-ethers
-
-enable-ra
-quiet-ra
-
-dhcp-range=10.0.0.40, 10.0.0.254,48h
-dhcp-range=::10, ::400, constructor:brlan ra-names, slaac, 48h
-dhcp-option=option6:information-refresh-time,6h
-
-dhcp-authoritative
-dhcp-rapid-commit
-
-# IPv4 DNS server
-dhcp-option=option:dns-server,10.0.0.1
-
-# IPv4 gateway
-dhcp-option=option:router,10.0.0.1
diff --git a/alpine/roles/frp/tasks/frpc.yml b/alpine/roles/frp/tasks/frpc.yml
@@ -1,20 +0,0 @@
----
-
-- copy:
-   content: '# Configuration for /etc/init.d/frpc\nfrps_opts="-c /etc/frpc.ini"'
-   dest: /etc/conf.d/frpc
-   mode: 0644
-
-- name: create config for frpc
-  template:
-    src: frpc.conf.j2
-    dest: /etc/frpc.ini
-    owner: frpc
-    group: frpc
-    mode: 0644
-
-- name: "Restart and enable service: frpc"
-  service:
-    name: frpc
-    state: restarted
-    enabled: yes
diff --git a/alpine/roles/frp/tasks/frps.yml b/alpine/roles/frp/tasks/frps.yml
@@ -1,37 +0,0 @@
----
-
-- name: Copy http(s) firewall-rule to destination host
-  copy:
-    src: config-files/awall/frps.json
-    dest: /etc/awall/optional/frps.json
-    validate: jq '.' %s
-
-- name: "Enable firewall-rule for: frps"
-  awall:
-   name: frps
-   state: enabled
-   activate: yes
-
-- template:
-   src: frps.confd.j2
-   dest: /etc/conf.d/frps
-   mode: 0644
-
-- name: copy nginx-vhost for frps to destination host 
-  template: 
-    src: frps-vhost.conf.j2
-    dest: /etc/nginx/conf.d/frps.conf
-    mode: 0644
-    owner: nginx
-    group: nginx
-
-- name: "Restart and enable service: frps"
-  service:
-    name: frps
-    state: restarted
-    enabled: yes
-
-- name: "Restart and enable service: nginx"
-  service:
-    name: nginx
-    state: restarted
diff --git a/alpine/roles/frp/tasks/main.yml b/alpine/roles/frp/tasks/main.yml
@@ -1,13 +0,0 @@
----
-
-- name: Install frp
-  apk:
-   name: frp
-   state: present
-   update_cache: yes
-
-- include: frps.yml
-  when: frps is defined
-
-- include: frpc.yml
-  when: frpc is defined-
\ No newline at end of file
diff --git a/alpine/roles/frp/templates/frpc.conf.j2 b/alpine/roles/frp/templates/frpc.conf.j2
@@ -1,33 +0,0 @@
-[common]
-server_addr = {{ frpc.serverAddress }}
-server_port = {{ frpc.serverPort }}
-token       = {{ frpc.token }}
-{% if frpc.dashboard is true %}
-dashboard_port = {{ frpc.dashboard_port }}
-dashboard_user = {{ frpc.dashboard_user }}
-dashboard_pwd  = {{ frpc.dashboard_pwd }}
-{% endif %}
-
-{% for tunnel in frpc.tunnels %}
-[{{ tunnel.name }}]
-type = {{ tunnel.type }}
-{% if tunnel.local_ip is defined %}
-local_ip = {{ tunnel.local_ip }}
-{% endif %}
-{% if tunnel.local_port is defined %}
-local_port = {{ tunnel.local_port }}
-{% endif %}
-{% if tunnel.remote_port is defined %}
-remote_port = {{ tunnel.remote_port }}
-{% endif %}
-{% if tunnel.custom_domains is defined %}
-custom_domains = {{ tunnel.custom_domains }}
-{% endif %}
-{% if tunnel.subdomain is defined %}
-subdomain = {{ tunnel.subdomain }}
-{% endif %}
-{% if tunnel.locations is defined %}
-locations = {{ tunnel.locations }}
-{% endif %}
-
-{% endfor %}-
\ No newline at end of file
diff --git a/alpine/roles/frp/templates/frps-vhost.conf.j2 b/alpine/roles/frp/templates/frps-vhost.conf.j2
@@ -1,32 +0,0 @@
-{% for vhost in frps.vhosts %}
-server {
-	listen 443 ssl;
-	listen [::]:443 ssl;
-
-	ssl_certificate "{{frps.ssl_cert}}";
-	ssl_certificate_key "{{frps.ssl_privkey}}";
-	include /etc/nginx/ssl.conf;
-	
-	server_name {{vhost}}.{{frps.vhost_domain}};
-
-	location / {
-		proxy_pass http://127.0.0.1:{{frps.vhost_port}}/;
-
-        proxy_http_version      1.1;
-        proxy_set_header        Upgrade $http_upgrade;
-        proxy_set_header        Connection "upgrade";
-
-        proxy_redirect          off;
-        proxy_connect_timeout   90;
-        proxy_send_timeout      90;
-        proxy_read_timeout      90;
-        proxy_set_header        Host $host;
-        proxy_set_header        X-Real-IP $remote_addr;
-        proxy_set_header        X-Forwarded-For $proxy_add_x_forwarded_for;
-        proxy_set_header        X-Forwarded-Proto $scheme;
-        proxy_set_header        X-Forwarded-Host $host;
-        proxy_set_header        X-Forwarded-Server $host;
-	}
-}
-
-{% endfor %}
diff --git a/alpine/roles/frp/templates/frps.confd.j2 b/alpine/roles/frp/templates/frps.confd.j2
@@ -1,2 +0,0 @@
-# Configuration for /etc/init.d/frps
-frps_opts="--token={{frps.token}} --bind_port={{frps.port}} --subdomain_host={{frps.vhost_domain}} --vhost_http_port={{frps.vhost_port}}"
diff --git a/alpine/roles/gitolite/tasks/main.yml b/alpine/roles/gitolite/tasks/main.yml
@@ -1,59 +0,0 @@
----
-- name: "Install package: gitolite"
-  apk:
-   name: gitolite, git
-   state: present
-   update_cache: yes
-
-- fail: msg="gitolite_initialKey is not defined!"
-  when: gitolite_initialKey is not defined
-
-- name: copy initial ssh-key to destination host
-  when: gitolite_initialKey is defined
-  copy:
-    content: "{{gitolite_initialKey}}"
-    dest: /var/lib/git/first-user-key.pub
-    owner: git
-    group: git
-
-- name: Initial setup of gitolite
-  become: yes
-  become_user: git
-  command:
-    cmd: gitolite setup -pk /var/lib/git/first-user-key.pub
-    creates: /var/lib/git/.gitolite
-
-- name: Delete first-user-key.pub
-  file:
-    path: /var/lib/git/first-user-key.pub
-    state: absent
-
-- name: Unlock the git user
-  ignore_errors: yes
-  command:
-    cmd: passwd -u git
-
-- name: fix gitolite.rc to set correct permissons
-  patch:
-    src: config-files/gitolite/gitolite.rc.patch
-    dest: /var/lib/git/.gitolite.rc
-
-- name: set permissions for git dir
-  file:
-    path: /var/lib/git
-    state: directory  
-    mode: 0755
-    owner: git
-    group: git
-
-- name: Copy ssh firewall-rule to destination host
-  copy:
-    src: config-files/awall/ssh.json
-    dest: /etc/awall/optional/ssh.json
-    validate: jq '.' %s
-
-- name: "Activate firewall-rule for: ssh"
-  awall:
-   name: ssh
-   state: enabled
-   activate: yes
diff --git a/alpine/roles/grafana/tasks/main.yml b/alpine/roles/grafana/tasks/main.yml
@@ -1,40 +0,0 @@
----
-
-- name: "Install package: grafana"
-  apk:
-   name: grafana
-   state: present
-   update_cache: yes
-
-- name: copy grafana configs to destination host
-  copy: 
-    src: config-files/grafana/grafana.ini
-    dest: /etc/grafana.ini
-    mode: 0755
-
-- name: copy grafana configs to destination host
-  copy: 
-    src: config-files/grafana/provisioning/
-    dest: /var/lib/grafana/provisioning/
-    mode: 0755
-    owner: grafana
-    group: grafana
-
-- name: copy nginx-vhost for grafana to destination host
-  template:
-    src: grafana-vhost.conf.j2
-    dest: /etc/nginx/conf.d/grafana.conf
-    mode: 0644
-    owner: nginx
-    group: nginx
-
-- name: "Restart and enable service: grafana"
-  service:
-   name: grafana
-   enabled: yes
-   state: restarted
-
-- name: "Restart service: nginx"
-  service:
-   name: nginx
-   state: restarted
diff --git a/alpine/roles/grafana/templates/grafana-vhost.conf.j2 b/alpine/roles/grafana/templates/grafana-vhost.conf.j2
@@ -1,15 +0,0 @@
-server {
-	listen 443 ssl;
-	listen [::]:443 ssl;
-
-	ssl_certificate "{{grafana.ssl_cert}}";
-	ssl_certificate_key "{{grafana.ssl_privkey}}";
-	include /etc/nginx/ssl.conf;
-	
-	server_name {{grafana.domain}};
-
-
-	location / {
-		proxy_pass http://localhost:3000/;
-	}
-}
diff --git a/alpine/roles/hostapd/tasks/main.yml b/alpine/roles/hostapd/tasks/main.yml
@@ -1,18 +0,0 @@
----
-
-- name: "Install package: hostapd" 
-  apk:
-   name: hostapd 
-   state: present
-   update_cache: yes
-
-- name: "create file: /etc/hostapd/hostapd.conf"
-  template:
-    src: hostapd.conf.j2
-    dest: /etc/hostapd/hostapd.conf
-
-- name: "Enable and restart service: hostapd"
-  service:
-   name: hostapd
-   enabled: yes
-   state: restarted
diff --git a/alpine/roles/hostapd/templates/hostapd.conf.j2 b/alpine/roles/hostapd/templates/hostapd.conf.j2
@@ -1,23 +0,0 @@
-interface={{hostapd.interface}}
-bridge={{hostapd.bridge}}
-
-ssid={{hostapd.ssid}}
-driver=nl80211
-country_code=DE
-
-hw_mode=g
-channel={{hostapd.channel}}
-
-wpa=2
-auth_algs=1
-
-rsn_pairwise=CCMP
-wpa_key_mgmt=WPA-PSK
-wpa_passphrase={{hostapd.passphrase}}
-
-logger_stdout=-1
-logger_stdout_level=2
-
-ieee80211n=1
-wmm_enabled=1
-ht_capab=[HT40+]
diff --git a/alpine/roles/maddy/tasks/main.yml b/alpine/roles/maddy/tasks/main.yml
@@ -1,52 +0,0 @@
----
-
-- name: "Install package: maddy"
-  apk:
-   name: maddy
-   state: present
-   update_cache: yes
-
-- name: adding user maddy to group acme-redirect
-  user:
-    name: maddy
-    groups: acme-redirect
-    append: yes
-
-- name: copy maddy-config to destination host 
-  template: 
-    src: maddy.conf.j2
-    dest: /etc/maddy/maddy.conf
-    mode: 0644
-    owner: maddy
-    group: maddy
-
-- name: "Create file: /var/log/maddy.log"
-  file:
-    path:  /var/log/maddy.log
-    state: touch
-    owner: maddy
-    group: maddy
-    mode:  0644
-
-- name: "Patch file: /etc/init.d/maddy"
-  patch:
-    src: config-files/maddy/maddy-service.patch
-    dest: /etc/init.d/maddy
-
-- name: Copy mail firewall-rule to destination host
-  copy:
-    src: config-files/awall/mail.json
-    dest: /etc/awall/optional/mail.json
-    validate: jq '.' %s
-
-- name: "Activate firewall-rule for: mail"
-  awall:
-   name: mail
-   state: enabled
-   activate: yes
-
-- name: "Restart and enable service: maddy"
-  service:
-   name: maddy
-   enabled: yes
-   state: restarted
diff --git a/alpine/roles/maddy/templates/maddy.conf.j2 b/alpine/roles/maddy/templates/maddy.conf.j2
@@ -1,184 +0,0 @@
-## maddy 0.4 - default configuration file
-
-log syslog /var/log/maddy.log
-
-# ----------------------------------------------------------------------------
-# Base variables
- $(hostname) = {{maddy.hostname}} 
-$(primary_domain) = ctu.cx
-$(local_domains) = $(hostname) $(primary_domain) antifa.jetzt thein.ovh ctucx.de
-
-tls file {{maddy.ssl_cert}} {{maddy.ssl_privkey}}
-
-# ----------------------------------------------------------------------------
-# Local storage & authentication
-
-# pass_table provides local hashed passwords storage for authentication of
-# users. It can be configured to use any "table" module, in default
-# configuration a table in SQLite DB is used.
-# Table can be replaced to use e.g. a file for passwords. Or pass_table module
-# can be replaced altogether to use some external source of credentials (e.g.
-# PAM, /etc/shadow file).
-#
-# If table module supports it (sql_table does) - credentials can be managed
-# using 'maddyctl creds' command.
-
-auth.pass_table local_authdb {
-    table sql_table {
-        driver sqlite3
-        dsn credentials.db
-        table_name passwords
-    }
-}
-
-# imapsql module stores all indexes and metadata necessary for IMAP using a
-# relational database. It is used by IMAP endpoint for mailbox access and
-# also by SMTP & Submission endpoints for delivery of local messages.
-#
-# IMAP accounts, mailboxes and all message metadata can be inspected using
-# imap-* subcommands of maddyctl utility.
-
-storage.imapsql local_mailboxes {
-    driver sqlite3
-    dsn imapsql.db
-}
-
-# ----------------------------------------------------------------------------
-# SMTP endpoints + message routing
-
-hostname $(hostname)
-
-msgpipeline local_routing {
-    dmarc yes
-    check {
-        require_mx_record
-        dkim
-        spf
-    }
-
-    # Insert handling for special-purpose local domains here.
-    # e.g.
-    # destination lists.example.org {
-    #     deliver_to lmtp tcp://127.0.0.1:8024
-    # }
-
-    destination postmaster $(local_domains) {
-        modify {
-            replace_rcpt static {
-               entry postmaster           postmaster@$(primary_domain)
-               entry leon@thein.ovh       leah@ctu.cx
-               entry leah@thein.ovh       leah@ctu.cx
-               entry leah@antifa.jetzt    leah@ctu.cx
-            }
-
-            # Implement plus-address notation.
-            replace_rcpt regexp "(.+)\+(.+)@(.+)" "$1@$3"
-
-            replace_rcpt regexp "(.+)@ctucx.de" "leah@ctu.cx"
-            replace_rcpt regexp "(.+)@ctu.cx"   "leah@ctu.cx"
-        }
-
-        deliver_to &local_mailboxes
-    }
-
-    default_destination {
-        reject 550 5.1.1 "User doesn't exist"
-    }
-}
-
-smtp tcp://0.0.0.0:25 {
-    limits {
-        # Up to 20 msgs/sec across max. 10 SMTP connections.
-        all rate 20 1s
-        all concurrency 10
-    }
-
-    source $(local_domains) {
-        reject 501 5.1.8 "Use Submission for outgoing SMTP"
-    }
-
-    default_source {
-        destination postmaster $(local_domains) {
-            deliver_to &local_routing
-        }
-
-        default_destination {
-            reject 550 5.1.1 "User doesn't exist"
-        }
-    }
-}
-
-submission tls://0.0.0.0:465 tcp://0.0.0.0:587 {
-    limits {
-        # Up to 50 msgs/sec across any amount of SMTP connections.
-        all rate 50 1s
-    }
-
-    auth &local_authdb
-
-    source $(local_domains) {
-        destination postmaster $(local_domains) {
-            deliver_to &local_routing
-        }
-
-        default_destination {
-            modify {
-                dkim $(primary_domain) $(local_domains) default {
-                	newkey_algo ed25519
-                }
-            }
-            deliver_to &remote_queue
-        }
-    }
-
-    default_source {
-        reject 501 5.1.8 "Non-local sender domain"
-    }
-}
-
-target.remote outbound_delivery {
-    limits {
-        # Up to 20 msgs/sec across max. 10 SMTP connections
-        # for each recipient domain.
-        destination rate 20 1s
-        destination concurrency 10
-    }
-
-    mx_auth {
-        dane
-
-        mtasts {
-            cache fs
-            fs_dir mtasts_cache/
-        }
-
-        local_policy {
-            min_tls_level encrypted
-            min_mx_level none
-        }
-    }
-}
-
-target.queue remote_queue {
-    target &outbound_delivery
-
-    autogenerated_msg_domain $(primary_domain)
-
-    bounce {
-        destination postmaster $(local_domains) {
-            deliver_to &local_routing
-        }
-
-        default_destination {
-            reject 550 5.0.0 "Refusing to send DSNs to non-local addresses"
-        }
-    }
-}
-
-# ----------------------------------------------------------------------------
-# IMAP endpoints
-
-imap tls://0.0.0.0:993 tcp://0.0.0.0:143 {
-    auth &local_authdb
-    storage &local_mailboxes
-}
diff --git a/alpine/roles/nginx/tasks/main.yml b/alpine/roles/nginx/tasks/main.yml
@@ -1,76 +0,0 @@
----
-
-- name: "Install package: nginx" 
-  apk:
-   name: nginx
-   state: present
-   update_cache: yes
-
-- name: adding user nginx to group acme-redirect
-  user:
-    name: nginx
-    groups: acme-redirect
-    append: yes
-
-- name: copy nginx config to destination host
-  copy:
-    src: config-files/nginx/nginx.conf
-    dest: /etc/nginx/nginx.conf
-    owner: nginx
-    group: nginx
-
-- name: copy ssl config to destination host
-  copy:
-    src: config-files/nginx/ssl.conf
-    dest: /etc/nginx/ssl.conf
-    owner: nginx
-    group: nginx
-
-- name: copy default vhost to destination host 
-  template:
-    src: vhost.conf.j2
-    dest: /etc/nginx/conf.d/default.conf
-    owner: nginx
-    group: nginx
-
-- name: Copy http(s) firewall-rule to destination host
-  copy:
-    src: config-files/awall/web.json
-    dest: /etc/awall/optional/web.json
-    validate: jq '.' %s
-
-- name: "activate firewall-rule for: http, https"
-  awall:
-   name: web
-   state: enabled
-   activate: yes
-
-- name: Download dh-params from mozilla
-  get_url:
-    url: https://ssl-config.mozilla.org/ffdhe2048.txt
-    dest: /etc/nginx/dhparam
-    owner: nginx
-    group: nginx    
-
-- name: "Create directory: /etc/nginx/passwd"
-  file:
-    path:  /etc/nginx/passwd
-    state: directory
-    owner: nginx
-    group: nginx
-    mode:  0700
-
-- name: copy config for reverse-proxying
-  copy:
-    src: config-files/nginx/proxy.conf
-    dest: /etc/nginx/proxy.conf
-    owner: nginx
-    group: nginx
-    mode: 0755
-
-- name: "Enable and start service: nginx"
-  service:
-   name: nginx
-   enabled: yes
-   state: restarted
-
diff --git a/alpine/roles/nginx/templates/vhost.conf.j2 b/alpine/roles/nginx/templates/vhost.conf.j2
@@ -1,22 +0,0 @@
-server {
-	listen 443 ssl default_server;
-	listen [::]:443 ssl default_server;
-
-	ssl_certificate "{{nginx.ssl_cert}}";
-	ssl_certificate_key "{{nginx.ssl_privkey}}";
-	include /etc/nginx/ssl.conf;
-
-	# Everything is a 404
-	location / {
-		return 404;
-	}
-
-	location /node-exporter {
-		proxy_pass http://127.0.0.1:9100/metrics;
-	}
-
-	# You may need this to prevent return 404 recursion.
-	location = /404.html {
-		internal;
-	}
-}
diff --git a/alpine/roles/oeffi-web/tasks/main.yml b/alpine/roles/oeffi-web/tasks/main.yml
@@ -1,64 +0,0 @@
----
-
-- name: "Install package: oeffi-web"
-  apk:
-   name: oeffi-web
-   state: present
-   update_cache: yes
-
-- name: copy service files to destination host 
-  template: 
-    src: oeffi-web.initd.j2
-    dest: "/etc/init.d/oeffi-web{{item}}"
-    mode: 0755
-  loop:
-    - 1
-    - 2
-    - 3
-    - 4
-
-- name: create logfiles
-  file:
-    path: "/var/log/oeffi-web"
-    mode: 0755
-    state: directory
-  loop:
-    - 1
-    - 2
-    - 3
-    - 4
-
-- name: create logfiles
-  file:
-    path: "/var/log/oeffi-web/{{item}}.log"
-    mode: 0777
-    state: touch
-  loop:
-    - 1
-    - 2
-    - 3
-    - 4
-
-- name: copy nginx-vhost for oeffi-web to destination host 
-  template: 
-    src: oeffi-web-vhost.conf.j2
-    dest: /etc/nginx/conf.d/oeffi-web.conf
-    mode: 0644
-    owner: nginx
-    group: nginx
-
-- name: "Enable and restart services: oeffi-web"
-  service:
-   name: "oeffi-web{{item}}"
-   enabled: yes
-   state: restarted
-  loop:
-    - 1
-    - 2
-    - 3
-    - 4
-
-- name: "Restart service: nginx"
-  service:
-    name: nginx
-    state: restarted
diff --git a/alpine/roles/oeffi-web/templates/oeffi-web-vhost.conf.j2 b/alpine/roles/oeffi-web/templates/oeffi-web-vhost.conf.j2
@@ -1,26 +0,0 @@
-upstream oeffiweb {
-	least_conn;
-	server 127.0.0.1:5001;
-	server 127.0.0.1:5002;
-	server 127.0.0.1:5003;
-	server 127.0.0.1:5004;
-}
-
-server {
-	listen 443 ssl;
-	listen [::]:443 ssl;
-
-	ssl_certificate "{{oeffi_web.ssl_cert}}";
-	ssl_certificate_key "{{oeffi_web.ssl_privkey}}";
-	include /etc/nginx/ssl.conf;
-	
-	server_name {{oeffi_web.domain}};
-
-	location / {
-		proxy_pass http://oeffiweb;
-	}
-
-	location /assets {
-		root /usr/share/oeffi-web;
-	}
-}
diff --git a/alpine/roles/oeffi-web/templates/oeffi-web.initd.j2 b/alpine/roles/oeffi-web/templates/oeffi-web.initd.j2
@@ -1,28 +0,0 @@
-#!/sbin/openrc-run
-supervisor=supervise-daemon
-
-name="oeffi-web"
-description="fast and simple tripplanner for the web"
-
-PORT=500{{item}}
-PID_FILE=/run/oeffi-web/{{item}}
-CACHE_PATH=/var/lib/oeffisearch
-
-export PORT
-export CACHE_PATH
-
-command="/usr/bin/oeffi-web"
-command_user=oeffisearch:oeffisearch
-command_background=true
-pidfile=/run/oeffi-web/{{item}}
-directory="/usr/share/oeffi-web"
-output_log="/var/log/oeffi-web/{{item}}.log"
-
-depend() {
-	need net localmount
-	after firewall
-}
-
-start_pre() {
-	checkpath -d -o oeffisearch:oeffisearch /run/oeffi-web
-}-
\ No newline at end of file
diff --git a/alpine/roles/oeffisearch/tasks/main.yml b/alpine/roles/oeffisearch/tasks/main.yml
@@ -1,64 +0,0 @@
----
-
-- name: "Install package: oeffisearch"
-  apk:
-   name: oeffisearch
-   state: present
-   update_cache: yes
-
-- name: copy service files to destination host 
-  template: 
-    src: oeffisearch.initd.j2
-    dest: "/etc/init.d/oeffisearch{{item}}"
-    mode: 0755
-  loop:
-    - 1
-    - 2
-    - 3
-    - 4
-
-- name: create logfiles
-  file:
-    path: "/var/log/oeffisearch"
-    mode: 0755
-    state: directory
-  loop:
-    - 1
-    - 2
-    - 3
-    - 4
-
-- name: create logfiles
-  file:
-    path: "/var/log/oeffisearch/{{item}}.log"
-    mode: 0777
-    state: touch
-  loop:
-    - 1
-    - 2
-    - 3
-    - 4
-
-- name: copy nginx-vhost for oeffisearch to destination host 
-  template: 
-    src: oeffisearch-vhost.conf.j2
-    dest: /etc/nginx/conf.d/oeffisearch.conf
-    mode: 0644
-    owner: nginx
-    group: nginx
-
-- name: "Enable and restart service: oeffisearch"
-  service:
-   name: "oeffisearch{{item}}"
-   enabled: yes
-   state: restarted
-  loop:
-    - 1
-    - 2
-    - 3
-    - 4
-
-- name: "Restart service: nginx"
-  service:
-    name: nginx
-    state: restarted
diff --git a/alpine/roles/oeffisearch/templates/oeffisearch-vhost.conf.j2 b/alpine/roles/oeffisearch/templates/oeffisearch-vhost.conf.j2
@@ -1,27 +0,0 @@
-upstream oeffisearch {
-	least_conn;
-	server 127.0.0.1:8081;
-	server 127.0.0.1:8082;
-	server 127.0.0.1:8083;
-	server 127.0.0.1:8084;
-}
-
-server {
-	listen 443 ssl;
-	listen [::]:443 ssl;
-
-	ssl_certificate "{{oeffisearch.ssl_cert}}";
-	ssl_certificate_key "{{oeffisearch.ssl_privkey}}";
-	include /etc/nginx/ssl.conf;
-	
-	server_name {{oeffisearch.domain}};
-
-	location / {
-		try_files $uri $uri/ @api;
-		root /usr/share/oeffisearch;
-	}
-
-	location @api {
-		proxy_pass http://oeffisearch;
-	}
-}
diff --git a/alpine/roles/oeffisearch/templates/oeffisearch.initd.j2 b/alpine/roles/oeffisearch/templates/oeffisearch.initd.j2
@@ -1,28 +0,0 @@
-#!/sbin/openrc-run
-supervisor=supervise-daemon
-
-name="oeffisearch"
-description="fast and simple tripplanner for the web"
-
-PORT=808{{item}}
-PID_FILE=/run/oeffisearch/{{item}}
-CACHE_PATH=/var/lib/oeffisearch
-
-export PORT
-export CACHE_PATH
-
-command="/usr/bin/oeffisearch"
-command_user="oeffisearch:oeffisearch"
-command_background=true
-pidfile=/run/oeffisearch/{{item}}
-directory="/var/lib/oeffisearch"
-output_log="/var/log/oeffisearch/{{item}}.log"
-
-depend() {
-	need net localmount
-	after firewall
-}
-
-start_pre() {
-	checkpath -d -o oeffisearch:oeffisearch /run/oeffisearch
-}-
\ No newline at end of file
diff --git a/alpine/roles/pleroma/tasks/main.yml b/alpine/roles/pleroma/tasks/main.yml
@@ -1,54 +0,0 @@
----
-
-- name: "Install package: postgresql"
-  apk:
-   name: postgresql postgresql-contrib
-   state: present
-   update_cache: yes  
-
-- name: "Install package: pleroma"
-  apk:
-   name: pleroma
-   state: present
-   update_cache: yes
-
-- name: "Copy config for: pleroma" 
-  copy: 
-    src: config-files/pleroma/config.exs
-    dest: /etc/pleroma/config.exs
-    mode: 0755
-    owner: pleroma
-    group: pleroma
-
-- name: create secrets.exs from passwordstore
-  copy:
-    content: "{{ lookup('community.general.passwordstore', 'server/{{network.hostname}}/pleroma.secrets returnall=true')}}"
-    dest:    /var/lib/pleroma/secret.exs
-    owner:   pleroma
-    group:   pleroma
-    mode:    0700
-
-- name: copy nginx-vhost for pleroma to destination host
-  template:
-    src: pleroma-vhost.conf.j2
-    dest: /etc/nginx/conf.d/pleroma.conf
-    mode: 0644
-    owner: nginx
-    group: nginx
-
-- name: "Enable and start service: postgresql"
-  service:
-   name: postgresql
-   enabled: yes
-   state: started
-
-- name: "Enable and restart service: pleroma"
-  service:
-   name: pleroma
-   enabled: yes
-   state: restarted
-
-- name: "Restart service: nginx"
-  service:
-   name: nginx
-   state: restarted
diff --git a/alpine/roles/pleroma/templates/pleroma-vhost.conf.j2 b/alpine/roles/pleroma/templates/pleroma-vhost.conf.j2
@@ -1,40 +0,0 @@
-server {
-	listen 443 ssl;
-	listen [::]:443 ssl;
-
-	ssl_certificate "{{pleroma.ssl_cert}}";
-	ssl_certificate_key "{{pleroma.ssl_privkey}}";
-	include /etc/nginx/ssl.conf;
-	
-	server_name {{pleroma.domain}};
-
-	gzip_vary on;
-	gzip_proxied any;
-	gzip_comp_level 6;
-   	gzip_buffers 16 8k;
-	gzip_http_version 1.1;
-	gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript application/activity+json application/atom+xml;
-
-	client_max_body_size 150m;
-	ignore_invalid_headers off;
-
-
-	location / {
-		proxy_pass http://localhost:4000/;
-
-		proxy_http_version      1.1;
-		proxy_set_header        Upgrade $http_upgrade;
-		proxy_set_header        Connection "upgrade";
-
-		proxy_redirect          off;
-		proxy_connect_timeout   90;
-		proxy_send_timeout      90;
-		proxy_read_timeout      90;
-		proxy_set_header        Host $host;
-		proxy_set_header        X-Real-IP $remote_addr;
-		proxy_set_header        X-Forwarded-For $proxy_add_x_forwarded_for;
-		proxy_set_header        X-Forwarded-Proto $scheme;
-		proxy_set_header        X-Forwarded-Host $host;
-		proxy_set_header        X-Forwarded-Server $host;
-	}
-}
diff --git a/alpine/roles/prometheus/tasks/main.yml b/alpine/roles/prometheus/tasks/main.yml
@@ -1,42 +0,0 @@
----
-
-- name: "Install package: prometheus"
-  apk:
-   name: prometheus chartsrv
-   state: present
-   update_cache: yes
-
-- name: copy prometheus-config to destination host 
-  copy: 
-    src: config-files/prometheus/prometheus.yml
-    dest: /etc/prometheus/prometheus.yml
-
-- name: copy chartsrv-config to destination host
-  copy:
-    content: 'chartsrv_opts="http://localhost:9090"'
-    dest: /etc/conf.d/chartsrv
-
-- name: copy nginx-vhost for prometheus to destination host
-  template:
-    src: prometheus-vhost.conf.j2
-    dest: /etc/nginx/conf.d/prometheus.conf
-    mode: 0644
-    owner: nginx
-    group: nginx
-
-- name: "Restart and enable service: prometheus"
-  service:
-   name: prometheus
-   enabled: yes
-   state: restarted
-
-- name: "Restart and enable service: chartsrv"
-  service:
-   name: chartsrv
-   enabled: yes
-   state: restarted
-
-- name: "Restart service: nginx"
-  service:
-   name: nginx
-   state: restarted
diff --git a/alpine/roles/prometheus/templates/prometheus-vhost.conf.j2 b/alpine/roles/prometheus/templates/prometheus-vhost.conf.j2
@@ -1,19 +0,0 @@
-server {
-	listen 443 ssl;
-	listen [::]:443 ssl;
-
-	ssl_certificate "{{prometheus.ssl_cert}}";
-	ssl_certificate_key "{{prometheus.ssl_privkey}}";
-	include /etc/nginx/ssl.conf;
-	
-	server_name {{prometheus.domain}};
-
-
-	location / {
-		proxy_pass http://127.0.0.1:9090;
-	}
-
-	location /chart.svg {
-		proxy_pass http://127.0.0.1:8142/chart.svg;
-	}
-}
diff --git a/alpine/roles/radicale/tasks/main.yml b/alpine/roles/radicale/tasks/main.yml
@@ -1,42 +0,0 @@
----
-
-- name: "Install package: radicale"
-  apk:
-   name: radicale
-   state: present
-   update_cache: yes
-
-- name: copy radicale-config to destination host 
-  copy: 
-    src: config-files/radicale/config
-    dest: /etc/radicale/config
-    mode: 0640
-    owner: root
-    group: radicale
-
-- name: copy radicale-users to destination host
-  copy: 
-    content: "{{ radicale.users }}"
-    dest: /etc/radicale/users
-    mode: 0640
-    owner: root
-    group: radicale
-
-- name: copy nginx-vhost for radicale to destination host
-  template:
-    src: radicale-vhost.conf.j2
-    dest: /etc/nginx/conf.d/radicale.conf
-    mode: 0644
-    owner: nginx
-    group: nginx
-
-- name: "Enable and restart service: radicale"
-  service:
-   name: radicale
-   enabled: yes
-   state: restarted
-
-- name: "Restart service: nginx"
-  service:
-   name: nginx
-   state: restarted
diff --git a/alpine/roles/radicale/templates/radicale-vhost.conf.j2 b/alpine/roles/radicale/templates/radicale-vhost.conf.j2
@@ -1,18 +0,0 @@
-server {
-	listen 443 ssl;
-	listen [::]:443 ssl;
-
-	ssl_certificate "{{radicale.ssl_cert}}";
-	ssl_certificate_key "{{radicale.ssl_privkey}}";
-	include /etc/nginx/ssl.conf;
-	
-	server_name {{radicale.domain}};
-
-
-	location / {
-		proxy_pass       http://localhost:5232/; # The / is important!
-		proxy_set_header Host $host;
-		proxy_set_header X-Real-IP $remote_addr;
-		proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
-	}
-}
diff --git a/alpine/roles/rest-server/tasks/main.yml b/alpine/roles/rest-server/tasks/main.yml
@@ -1,46 +0,0 @@
----
-
-- name: Install rest-server
-  apk:
-   name: rest-server
-   state: present
-   update_cache: yes
-
-- name: create a data dir for restic server
-  file:
-   path: /var/lib/rest-server
-   state: directory
-   owner: leah
-   group: leah
-
-- name: copy rest-server service file to server
-  copy:
-    src: config-files/rest-server/rest-server.initd
-    dest: /etc/init.d/rest-server
-    mode: 0755
-
-- service:
-   name: rest-server
-   enabled: yes
-   state: restarted
-
-- name: put passwd file for rest-server
-  copy:
-    content: "{{rest_server.passwd}}"
-    dest: /etc/nginx/passwd/rest-server
-    owner: nginx
-    group: nginx
-    mode: 0700
-
-- name: copy nginx-vhost for rest-server to destination host 
-  template: 
-    src: rest-server-vhost.conf.j2
-    dest: /etc/nginx/conf.d/rest-server.conf
-    mode: 0644
-    owner: nginx
-    group: nginx
-
-- name: restart nginx
-  service:
-    name: nginx
-    state: restarted
diff --git a/alpine/roles/rest-server/templates/rest-server-vhost.conf.j2 b/alpine/roles/rest-server/templates/rest-server-vhost.conf.j2
@@ -1,19 +0,0 @@
-server {
-	listen 443 ssl;
-	listen [::]:443 ssl;
-
-	ssl_certificate "{{rest_server.ssl_cert}}";
-	ssl_certificate_key "{{rest_server.ssl_privkey}}";
-	include /etc/nginx/ssl.conf;
-	
-	server_name {{rest_server.domain}};
-
-	auth_basic           "hello";
-	auth_basic_user_file /etc/nginx/passwd/rest-server; 
-
-	location / {
-		proxy_pass http://127.0.0.1:8060/;
-		client_max_body_size 500M;
-		include /etc/nginx/proxy.conf;
-	}
-}
diff --git a/alpine/roles/synapse/tasks/main.yml b/alpine/roles/synapse/tasks/main.yml
@@ -1,51 +0,0 @@
----
-
-- name: Install synapse
-  apk:
-   name: synapse riot-web
-   state: present
-   update_cache: yes
-
-- name: copy configs to destination host
-  copy:
-    src: "config-files/synapse/{{ item }}"
-    dest: "/etc/synapse/{{ item }}"
-    mode: 0755
-    owner: synapse
-    group: synapse
-  loop:
-    - homeserver.yaml
-    - log.yaml
-
-- name: copy riot-web config to destination host
-  copy:
-    src: "config-files/riot-web/config.json"
-    dest: "/etc/riot-web/config.json"
-    mode: 0644
-
-- name: create log directory
-  file: 
-    path: "/var/log/synapse"
-    state: directory
-    mode: 0755
-    owner: synapse
-    group: synapse
-
-- name: copy nginx-vhost for synapse to destination host 
-  template: 
-    src: synapse-vhost.conf.j2
-    dest: /etc/nginx/conf.d/synapse.conf
-    mode: 0644
-    owner: nginx
-    group: nginx
-
-- name: Enable and start synapse
-  service:
-   name: synapse
-   enabled: yes
-   state: restarted
-
-- name: restart nginx
-  service:
-    name: nginx
-    state: restarted
diff --git a/alpine/roles/synapse/templates/synapse-vhost.conf.j2 b/alpine/roles/synapse/templates/synapse-vhost.conf.j2
@@ -1,20 +0,0 @@
-server {
-	listen 443 ssl;
-	listen [::]:443 ssl;
-
-	ssl_certificate "{{synapse.ssl_cert}}";
-	ssl_certificate_key "{{synapse.ssl_privkey}}";
-	include /etc/nginx/ssl.conf;
-	
-	server_name {{synapse.domain}};
-
-	location /_matrix {
-		proxy_pass http://127.0.0.1:8008;
-		proxy_set_header X-Forwarded-For $remote_addr;
-		client_max_body_size 100M;
-	}
-
-	location / {
-		root /usr/share/webapps/riot-web;
-	}
-}
diff --git a/alpine/roles/syncthing/tasks/main.yml b/alpine/roles/syncthing/tasks/main.yml
@@ -1,46 +0,0 @@
----
-
-- name: Install syncthing
-  apk:
-   name: syncthing
-   state: present
-   update_cache: yes
-
-- name: copy initd to destination host 
-  template: 
-    src: syncthing-initd.j2
-    dest: /etc/init.d/syncthing-leah
-    mode: 0755
-
-- name: Copy syncthing firewall-rule to destination host
-  copy:
-    src: config-files/awall/syncthing.json
-    dest: /etc/awall/optional/syncthing.json
-    validate: jq '.' %s
-
-- name: copy nginx-vhost for syncthing to destination host
-  template:
-    src: syncthing-vhost.conf.j2
-    dest: /etc/nginx/conf.d/syncthing.conf
-    mode: 0644
-    owner: nginx
-    group: nginx
-  when: syncthing.disableReverseProxy is not defined
-
-- name: enable firewall-rules for syncthing
-  awall:
-   name: syncthing
-   state: enabled
-   activate: yes
-
-- name: restart syncthing
-  service:
-   name: syncthing-leah
-   enabled: yes
-   state: restarted
-
-- name: restart nginx
-  service:
-   name: nginx
-   state: restarted
-  when: syncthing.disableReverseProxy is not defined
diff --git a/alpine/roles/syncthing/templates/syncthing-initd.j2 b/alpine/roles/syncthing/templates/syncthing-initd.j2
@@ -1,24 +0,0 @@
-#!/sbin/openrc-run
-
-name=$RC_SVCNAME
-command=/usr/bin/syncthing
-{% if syncthing.guiAddress is defined %}
-command_args="-no-browser -gui-address={{ syncthing.guiAddress }}"
-{% else %}
-command_args="-no-browser"
-{% endif %}
-command_user="leah:leah"
-pidfile=/run/${RC_SVCNAME}.pid
-command_background=yes
-start_stop_daemon_args="--stdout /var/log/$RC_SVCNAME/${RC_SVCNAME}.log --stderr /var/log/$RC_SVCNAME/${RC_SVCNAME}.log"
-
-depend() {
-        use logger dns
-        need net
-        after firewall
-}
-
-start_pre() {
-        checkpath --directory --owner $command_user --mode 0775 \
-                /var/log/$RC_SVCNAME
-}
diff --git a/alpine/roles/syncthing/templates/syncthing-vhost.conf.j2 b/alpine/roles/syncthing/templates/syncthing-vhost.conf.j2
@@ -1,16 +0,0 @@
-server {
-	listen 443 ssl;
-	listen [::]:443 ssl;
-
-	ssl_certificate "{{syncthing.ssl_cert}}";
-	ssl_certificate_key "{{syncthing.ssl_privkey}}";
-	include /etc/nginx/ssl.conf;
-	
-	server_name {{syncthing.domain}};
-
-
-	location / {
-		proxy_pass       http://localhost:8384/;
-		proxy_set_header Host localhost;
-	}
-}
diff --git a/alpine/roles/websites/tasks/ctu.cx.yml b/alpine/roles/websites/tasks/ctu.cx.yml
@@ -1,21 +0,0 @@
----
-
-- name: create root directroy for ctu.cx
-  file:
-    path: /var/lib/websites/ctu.cx
-    state: directory
-    owner: leah
-    group: nginx
-
-- name: copy vhost for ctu.cx into place
-  copy:
-    src: config-files/website-vhosts/ctu.cx.conf
-    dest: /etc/nginx/conf.d/ctu.cx.conf
-    mode: 0644
-    owner: nginx
-    group: nginx
-
-- name: restart nginx
-  service:
-    name: nginx
-    state: restarted
diff --git a/alpine/roles/websites/tasks/main.yml b/alpine/roles/websites/tasks/main.yml
@@ -1,15 +0,0 @@
----
-- include: ctu.cx.yml
-  tags:
-    - install_ctu.cx
-  when: network.hostname == "wanderduene"
-
-- include: repo.f2k1.de.yml
-  tags:
-    - install_repo.f2k1.de
-  when: network.hostname == "wanderduene"
-
-- include: photos.ctu.cx.yml
-  tags:
-    - install_photos.ctu.cx
-  when: network.hostname == "taurus"
diff --git a/alpine/roles/websites/tasks/photos.ctu.cx.yml b/alpine/roles/websites/tasks/photos.ctu.cx.yml
@@ -1,21 +0,0 @@
----
-
-- name: create root directroy for photos.ctu.cx
-  file:
-    path: /var/lib/websites/photos.ctu.cx
-    state: directory
-    owner: leah
-    group: nginx
-
-- name: copy vhost for photos.ctu.cx into place
-  copy:
-    src: config-files/website-vhosts/photos.ctu.cx.conf
-    dest: /etc/nginx/conf.d/photos.ctu.cx.conf
-    mode: 0644
-    owner: nginx
-    group: nginx
-
-- name: restart nginx
-  service:
-    name: nginx
-    state: restarted
diff --git a/alpine/roles/websites/tasks/repo.f2k1.de.yml b/alpine/roles/websites/tasks/repo.f2k1.de.yml
@@ -1,14 +0,0 @@
----
-
-- name: copy vhost for repo.f2k1.de into place
-  copy:
-    src: config-files/website-vhosts/repo.f2k1.de.conf
-    dest: /etc/nginx/conf.d/repo.f2k1.de.conf
-    mode: 0644
-    owner: nginx
-    group: nginx
-
-- name: restart nginx
-  service:
-    name: nginx
-    state: restarted
diff --git a/alpine/scripts/restic-backup-wanderduene.sh b/alpine/scripts/restic-backup-wanderduene.sh
@@ -1,20 +0,0 @@
-#!/usr/bin/env bash
-
-#backup services
-for service in pleroma radicale synapse git maddy oeffisearch
-do
-  sudo -u $service restic init --password-file /var/lib/$service/restic-password --repo rest:https://restic:$(cat /var/lib/restic-password)@restic.ctu.cx/$(hostname)-$service
-  sudo -u $service restic backup --password-file /var/lib/$service/restic-password --repo rest:https://restic:$(cat /var/lib/restic-password)@restic.ctu.cx/$(hostname)-$service /var/lib/$service
-done
-
-#backup websites
-sudo -u leah restic init --password-file /var/lib/websites/restic-password --repo rest:https://restic:$(cat /var/lib/restic-password)@restic.ctu.cx/$(hostname)-websites
-sudo -u leah restic backup --password-file /var/lib/websites/restic-password --repo rest:https://restic:$(cat /var/lib/restic-password)@restic.ctu.cx/$(hostname)-websites /var/lib/websites
-
-#backup postgres
-SQLFILE=/var/lib/postgresql/backup/postgres_$(date "+%Y-%m-%d_%H:%M").sql
-sudo -u postgres mkdir /var/lib/postgresql/backup
-sudo -u postgres bash -c "pg_dumpall > $SQLFILE"
-sudo -u postgres restic init --password-file /var/lib/postgresql/restic-password --repo rest:https://restic:$(cat /var/lib/restic-password)@restic.ctu.cx/$(hostname)-postgres
-sudo -u postgres restic backup --password-file /var/lib/postgresql/restic-password --repo rest:https://restic:$(cat /var/lib/restic-password)@restic.ctu.cx/$(hostname)-postgres /var/lib/postgresql/backup
-sudo -u postgres rm -rf /var/lib/postgresql/backup
diff --git a/ansible.cfg b/ansible.cfg
@@ -0,0 +1,11 @@
+[defaults]
+roles_path = roles
+inventory = inventory
+gathering = smart
+fact_caching = jsonfile
+fact_caching_connection = .ansible-cache
+fact_caching_timeout = 86400
+nocows = 1
+
+[ssh_connection]
+pipelining = True+
\ No newline at end of file
diff --git a/arch/config-files/common/pacman.conf.patch b/arch/config-files/common/pacman.conf.patch
@@ -1,12 +0,0 @@
---- /etc/pacman.conf	2020-07-01 03:52:38.000000000 +0200
-+++ pacman.conf	2020-12-07 14:18:19.773580876 +0100
-@@ -83,6 +83,10 @@
- [community]
- Include = /etc/pacman.d/mirrorlist
-
-+[aurto]
-+Server = https://repo.f2k1.de
-+SigLevel = Optional TrustAll
-+
- # If you want to run 32 bit applications on your x86_64 system,
- # enable the multilib repositories as required here.-
\ No newline at end of file
diff --git a/arch/config-files/ferm/ferm-lollo.conf b/arch/config-files/ferm/ferm-lollo.conf
@@ -1,104 +0,0 @@
-# -*- shell-script -*-
-#
-# Ferm example script
-#
-# Firewall configuration for a router with a dynamic IP.
-#
-# Author: Max Kellermann <max@duempel.org>
-#
-
-@def $DEV_LAN = brlan;
-@def $DEV_WAN = enp2s0;
-
-@def $NET_LAN = 10.0.0.0/24;
-
-# globally accessible services
-@def $WAN_TCP = ( 22 );
-@def $WAN_UDP = ( 1194 );
-# ( ssh )
-# ( wireguard )
-
-# locally accessible services
-@def $LAN_TCP = ( 53 22 );
-@def $LAN_UDP = ( 53 67 69 123 );
-# ( dns ssh )
-# ( dns dhcp tftp ntp )
-
-# generic input and forwarding rules for ipv4 and ipv6
-domain (ip ip6) {
-    table filter {
-        chain INPUT {
-            policy DROP;
-
-            # connection tracking
-            mod state state INVALID DROP;
-            mod state state (ESTABLISHED RELATED) ACCEPT;
-
-            # allow local connections
-            interface lo ACCEPT;
-            interface $DEV_LAN ACCEPT;
-            interface wg-pbb ACCEPT;
-
-            # respond to ping
-            proto icmp ACCEPT;
-
-            # local services
-            interface ! $DEV_WAN {
-                proto tcp dport $LAN_TCP ACCEPT;
-                proto udp mod multiport destination-ports $LAN_UDP ACCEPT;
-            }
-
-            proto tcp dport $WAN_TCP ACCEPT;
-            proto udp dport $WAN_UDP ACCEPT;
-        }
-
-        # outgoing connections are not limited
-        chain OUTPUT policy ACCEPT;
-
-        chain FORWARD {
-            policy DROP;
-
-            # connection tracking
-            mod state state INVALID DROP;
-            mod state state (ESTABLISHED RELATED) ACCEPT;
-
-            # local clients can do whatever
-            interface $DEV_LAN ACCEPT;
-
-
-            proto icmp ACCEPT;
-
-            mod conntrack ctstate DNAT ACCEPT;
-
-            # the rest is dropped by the above policy
-        }
-    }
-}
-
-# nat only for ipv4
-domain ip {
-    table nat {
-        chain PREROUTING {
-            policy ACCEPT;
-
-            # port forwards, ala daddr $WAN_IP dport 65522 DNAT to 192.168.0.2:22;
-        }
-
-        chain POSTROUTING {
-            policy ACCEPT;
-
-            outerface $DEV_WAN MASQUERADE;
-            saddr $NET_LAN mod conntrack ctstate DNAT MASQUERADE; # needle point loopback
-        }
-    }
-}
-
-domain ip6 {
-    table filter {
-        chain INPUT {
-            proto ipv6-icmp icmpv6-type redirect DROP;
-            proto ipv6-icmp icmpv6-type 139 DROP;
-            proto ipv6-icmp ACCEPT;
-        }
-    }
-}-
\ No newline at end of file
diff --git a/arch/inventory b/arch/inventory
@@ -1,5 +0,0 @@
-[all:vars]
-ansible_ssh_user=root
-
-[lollo]
-192.168.178.116-
\ No newline at end of file
diff --git a/arch/playbook.yml b/arch/playbook.yml
@@ -1,182 +0,0 @@
----
-- hosts: all
-  remote_user: root
-  gather_facts: false
-  tasks:
-    - name: Install Python
-      raw: test -e /usr/bin/python || pacman -Sy --noconfirm python
-
-- hosts: lollo
-  name: Install lollo
-  roles:
-    - common
-    - kawaidesu.ansible_networkd
-    - hostapd
-    - dnsmasq
-#    - syncthing
-#    - frp
-  vars:
-    users:
-      - name: leah
-        groups: "wheel"
-        password: "$6$foobar123$1qcCmnoveirSdWY9XdgH5hCXv32hj0n/AyJX46sSp1LyGCA8QT/xxifebRxr89uIH6vwhzFGgz4.H2sG0en0f0"
-        sshKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCw/G6x8H3ojvHx3NsTswBMMmOhp48F3rea0GUniKSvRLMRIti5b7Q4P4FXnkQEtuNSR3u7gE5r4EacaLaIx7Az9SgHRoE+hdzSo4mPAwKTx/E3HZgIjdZhTDL8PAn4SZZT6RBqr/uGb+x9fdIjY0FbdNBLjq0MNnG3T+qd1joUL8JXoS7F//ac52RhHlsA5qJXFDOhpqR/7hRMwOFNH0GKaLN1xQKcOjhpIcdswpOf8kRDVpT7xOYwfXCFF4MaY2M8047WKarvEnGdADIIw6bvWsdJINehtOQmYEFRaMuaWp1d9bglZXZKPQKNubv5lqneMP4AI7ImDYjgW6eNLIT1 cardno:000603502829"
-
-    network:
-      hostname: lollo
-      domain: ctu.cx
-      ip_forwarding: true
-      useFerm: true
-
-    networkd:
-      networkd_resolv_conf_content:
-        - nameserver 1.1.1.1
-        - nameserver 8.8.8.8
-      networkd_apply_action: "restart"
-      netdev:
-        - name: enp2s0.5
-          priority: 20
-          content:
-            - NetDev:
-              - Name: enp2s0.5
-              - Kind: vlan
-            - VLAN:
-              - Id: 5
-        - name: wg-pbb
-          priority: 30
-          content:
-            - NetDev:
-              - Name: wg-pbb
-              - Kind: wireguard
-            - WireGuard:
-              - PrivateKey: "{{ lookup('community.general.passwordstore', 'server/lollo/wireguard.privkey returnall=true') }}"
-              - FirewallMark: 51820
-            - WireGuardPeer:
-              - PublicKey: "{{ lookup('community.general.passwordstore', 'server/desastro/wireguard.pubkey returnall=true') }}"
-              - AllowedIPs:  "0.0.0.0/0, ::/0"
-              - Endpoint: "desastro.ctu.cx:51820"
-              - PersistentKeepalive: 25
-        - name: brlan
-          priority: 40 
-          content:
-            - NetDev:
-              - Name: brlan
-              - Kind: bridge
-      network:
-        - name: enp2s0
-          priority: 20
-          content:
-            - Match:
-              - Name: enp2s0
-            - Network:
-              - DHCP: yes
-              - VLAN: enp2s0.5
-        - name: enp2s0.5
-          priority: 20
-          content:
-            - Match:
-              - Name: enp2s0.5
-            - Network:
-              - Bridge: brlan
-        - name: wg-pbb
-          priority: 30
-          content:
-            - Match:
-              - Name: wg-pbb
-            - Network:
-              - Address: 195.39.246.32/32
-              - Address: 2a0f:4ac0:acab::1/128
-            - RoutingPolicyRule:
-              - FirewallMark: 51820
-              - Family: both
-              - Priority: 1000
-            - RoutingPolicyRule:
-              - FirewallMark: 51820
-              - Table: 2342
-              - Family: both
-              - Priority: 1500
-            - Route:
-              - Destination: 0.0.0.0/0
-              - Table: 1234
-            - Route:
-              - Destination: ::/0
-              - Table: 1234
-            - Route:
-              - Destination: 0.0.0.0/0
-              - Table: 2342
-              - Type: unreachable
-            - Route:
-              - Destination: ::/0
-              - Table: 2342
-              - Type: unreachable
-        - name: brlan
-          priority: 40
-          content:
-            - Match:
-              - Name: brlan
-              - Driver: bridge
-            - Network:
-              - DHCP: no
-              - Address: 195.39.246.32/28
-              - Address: 10.0.0.1/24
-              - Address: 2a0f:4ac0:acab::1/48
-            - RoutingPolicyRule:
-              - From: 195.39.246.32/28
-              - Table: 1234
-              - Priority: 2000
-            - RoutingPolicyRule:
-              - From: 2a0f:4ac0:acab::/48
-              - Table: 1234
-              - Priority: 2000
-
-    hostapd:
-      interface: wlp3s0
-      bridge: brlan
-      channel: 1
-      ssid: legacy.home.ctu.cx
-      passphrase: "{{ lookup('community.general.passwordstore', 'WiFi/legacy.home.ctu.cx returnall=true')}}"
-
-    dnsmasq:
-      wan_interface: enp2s0
-      local_service: true
-      no_resolv: true
-      domain_needed: true
-      bogus_priv: true
-      expand_hosts: true
-      read_ethers: true
-      enable_ra: true
-      quiet_ra: true
-      domain: home.ctu.cx
-      dns_servers:
-        - 1.1.1.1
-        - 1.0.0.1
-        - 8.8.8.8
-        - 8.8.4.4
-      dhcp:
-        authoritative: true
-        rapid_commit: true
-        sequential_ip: true
-        options:
-          - option6:information-refresh-time,6h
-          - option:dns-server,10.0.0.1
-          - option:router,10.0.0.1
-        ranges:
-          - 195.39.246.33, 195.39.246.42, 255.255.255.240, 48h
-          - 10.0.0.40,     10.0.0.253,    255.255.255.0,   48h
-          - 2a0f:4ac0:acab::, ra-names, 48h
-
-    syncthing:
-      disableReverseProxy: true
-      guiAddress: 0.0.0.0:8384
-
-    frpc:
-      serverAddress: wanderduene.ctu.cx
-      serverPort: 5050
-      token: "{{ lookup('community.general.passwordstore', 'server/wanderduene/frps/token returnall=true')}}"
-      dashboard: false
-      tunnels:
-        - name: lollo-ssh
-          type: tcp
-          local_ip: 127.0.0.1
-          local_port: 22
-          remote_port: 2202-
\ No newline at end of file
diff --git a/arch/roles/common/tasks/firewall.yml b/arch/roles/common/tasks/firewall.yml
@@ -1,19 +0,0 @@
----
-
-- name: "Install package: ferm"
-  pacman:
-    name: ferm
-    state: present
-    update_cache: yes
-
-- name: copy ferm config to destination
-  copy:
-    src: "config-files/ferm/ferm-{{ network.hostname }}.conf"
-    dest: /etc/ferm.conf
-    mode: 0644
-
-- name: "Start and enable service: ferm"
-  systemd:
-   name: ferm
-   enabled: yes
-   state: started-
\ No newline at end of file
diff --git a/arch/roles/common/tasks/ip-forwarding.yml b/arch/roles/common/tasks/ip-forwarding.yml
@@ -1,37 +0,0 @@
----
-
-- name: Enable IPv4 forwarding
-  ansible.posix.sysctl:
-    name: net.ipv4.ip_forward
-    value: '1'
-    sysctl_set: yes
-    state: present
-    reload: yes
-  when: network.ip_forwarding is true
-
-- name: Disable IPv6 forwarding
-  ansible.posix.sysctl:
-    name: net.ipv6.conf.all.forwarding
-    value: '1'
-    sysctl_set: yes
-    state: present
-    reload: yes
-  when: network.ip_forwarding is true
-
-- name: Disable IPv4 forwarding
-  ansible.posix.sysctl:
-    name: net.ipv4.ip_forward
-    value: '0'
-    sysctl_set: yes
-    state: present
-    reload: yes
-  when: network.ip_forwarding is false
-
-- name: Disable IPv6 forwarding
-  ansible.posix.sysctl:
-    name: net.ipv6.conf.all.forwarding
-    value: '0'
-    sysctl_set: yes
-    state: present
-    reload: yes
-  when: network.ip_forwarding is false-
\ No newline at end of file
diff --git a/arch/roles/common/tasks/main.yml b/arch/roles/common/tasks/main.yml
@@ -1,19 +0,0 @@
----
-
-- include: packages.yml
-
-- include: sudo.yml
-
-- include: sshd.yml
-
-- include: users.yml
-
-- include: network.yml
-
-- include: ip-forwarding.yml
-  when: network.ip_forwarding is defined 
-
-- include: firewall.yml
-  when: network.useFerm is defined
-
-- include: node-exporter.yml
diff --git a/arch/roles/common/tasks/network.yml b/arch/roles/common/tasks/network.yml
@@ -1,19 +0,0 @@
----
-
-- name: "create file: /etc/hostname"
-  copy:
-    content: "{{network.hostname}}"
-    dest: /etc/hostname
-  register: hostname
-
-- name: Change hostname of running system
-  hostname:
-    name: "{{network.hostname}}"
-    use: systemd
-  when: hostname.changed
-
-- name: "Start and enable service: systemd-networkd"
-  systemd:
-    name: systemd-networkd
-    state: started
-    enabled: yes
diff --git a/arch/roles/common/tasks/node-exporter.yml b/arch/roles/common/tasks/node-exporter.yml
@@ -1,13 +0,0 @@
----
-
-- name: "Install package: prometheus-node-exporter"
-  pacman:
-    name: prometheus-node-exporter
-    state: present
-    update_cache: yes
-
-- name: "Start and enable service: prometheus-node-exporter"
-  systemd:
-    name: prometheus-node-exporter
-    state: started
-    enabled: yes
diff --git a/arch/roles/common/tasks/packages.yml b/arch/roles/common/tasks/packages.yml
@@ -1,33 +0,0 @@
----
-
-- name: "Install package: patch"
-  pacman:
-    name: patch
-    update_cache: yes
-
-- name: "Patch file: /etc/pacman.conf (add isas aur-repo)"
-  ansible.posix.patch:
-    src: config-files/common/pacman.conf.patch
-    dest: /etc/pacman.conf
-
-- name: Upgrade system
-  pacman:
-    update_cache: yes
-    upgrade: yes
-
-- name: Install common packages
-  pacman:
-    name:
-      - nano
-      - micro
-      - sudo
-      - htop
-      - tar
-      - unzip
-      - curl 
-      - wget
-      - tmux
-      - git
-      - jq
-      - restic
-    update_cache: yes-
\ No newline at end of file
diff --git a/arch/roles/common/tasks/sshd.yml b/arch/roles/common/tasks/sshd.yml
@@ -1,13 +0,0 @@
----
-
-- name: "Install package: openssh"
-  pacman:
-    name: openssh
-    state: present
-    update_cache: yes
-
-- name: "Start and enable service: sshd"
-  systemd:
-    name: sshd
-    state: started
-    enabled: yes
diff --git a/arch/roles/common/tasks/sudo.yml b/arch/roles/common/tasks/sudo.yml
@@ -1,7 +0,0 @@
----
-
-- name: "Install/Upgrade package: sudo "
-  pacman:
-    name: sudo
-    state: present
-    update_cache: yes-
\ No newline at end of file
diff --git a/arch/roles/common/tasks/users.yml b/arch/roles/common/tasks/users.yml
@@ -1,34 +0,0 @@
----
-
-- name: "Add groups" 
-  group:
-    name: "{{item.name}}"
-    state: present
-  loop: "{{ users }}"
-
-- name: "Add users" 
-  user:
-    append: yes
-    name: "{{item.name}}"
-    group: "{{item.name}}"
-    groups: "{{item.groups}}"
-    password: "{{item.password}}"
-  loop: "{{ users }}"
-
-- name: "Create ~/.ssh directory for users"
-  file:
-    state: directory
-    dest: "/home/{{item.name}}/.ssh/"
-    mode: 0755
-    owner: "{{item.name}}"
-    group: "{{item.name}}"
-  loop: "{{ users }}"
-
-- name: "Place ssh-key for users"
-  copy:
-    content: "{{item.sshKey}}"
-    dest: "/home/{{item.name}}/.ssh/authorized_keys"
-    mode: 0644    
-    owner: "{{item.name}}"
-    group: "{{item.name}}"
-  loop: "{{ users }}"
diff --git a/arch/roles/dnsmasq/tasks/main.yml b/arch/roles/dnsmasq/tasks/main.yml
@@ -1,18 +0,0 @@
----
-
-- name: "Install package: dnsmasq" 
-  pacman:
-   name: dnsmasq 
-   state: present
-   update_cache: yes
-
-- name: "create file: /etc/dnsmasq.d/ansible.conf"
-  template:
-    src: dnsmasq.conf.j2
-    dest: /etc/dnsmasq.conf
-
-- name: "Enable and restart service: dnsmasq"
-  systemd:
-   name: dnsmasq
-   enabled: yes
-   state: restarted
diff --git a/arch/roles/dnsmasq/templates/dnsmasq.conf.j2 b/arch/roles/dnsmasq/templates/dnsmasq.conf.j2
@@ -1,69 +0,0 @@
-{% if dnsmasq.local_service is defined and dnsmasq.local_service is true  %}
-local-service
-{% endif %}
-
-{% if dnsmasq.no_resolv is defined and dnsmasq.no_resolv is true  %}
-no-resolv
-{% endif %}
-
-{% if dnsmasq.domain_needed is defined and dnsmasq.domain_needed is true  %}
-domain-needed
-{% endif %}
-
-{% if dnsmasq.bogus_priv is defined and dnsmasq.bogus_priv is true  %}
-bogus-priv
-{% endif %}
-
-{% if dnsmasq.expand_hosts is defined and dnsmasq.expand_hosts is true  %}
-expand-hosts
-{% endif %}
-
-{% if dnsmasq.read_ethers is defined and dnsmasq.read_ethers is true  %}
-read-ethers
-{% endif %}
-
-{% if dnsmasq.enable_ra is defined and dnsmasq.enable_ra is true  %}
-enable-ra
-{% endif %}
-
-{% if dnsmasq.quiet_ra is defined and dnsmasq.quiet_ra is true  %}
-quiet-ra
-{% endif %}
-
-{% for dns_server in dnsmasq.dns_servers %}
-server={{ dns_server }}@{{ dnsmasq.wan_interface }}
-{% endfor %}
-
-{% if dnsmasq.domain is defined %}
-# allow /etc/hosts and dhcp lookups for local domains
-local=/{{ dnsmasq.domain }}/
-domain={{ dnsmasq.domain }}
-{% endif %}
-
-
-{% if dnsmasq.dhcp is defined %}
-{% if dnsmasq.dhcp.authoritative is defined and dnsmasq.dhcp.authoritative is true  %}
-dhcp-authoritative
-{% endif %}
-
-{% if dnsmasq.dhcp.rapid_commit is defined and dnsmasq.dhcp.rapid_commit is true  %}
-dhcp-rapid-commit
-{% endif %}
-
-{% if dnsmasq.dhcp.sequential_ip is defined and dnsmasq.dhcp.sequential_ip is true  %}
-dhcp-sequential-ip
-{% endif %}
-
-{% if dnsmasq.dhcp.ranges is defined %}
-{% for dhcp_range in dnsmasq.dhcp.ranges %}
-dhcp-range={{ dhcp_range }}
-{% endfor %}
-{% endif %}
-
-{% if dnsmasq.dhcp.options is defined %}
-{% for dhcp_option in dnsmasq.dhcp.options %}
-dhcp-option={{ dhcp_option}}
-{% endfor %}
-{% endif %}
-
-{% endif %}-
\ No newline at end of file
diff --git a/arch/roles/hostapd/tasks/main.yml b/arch/roles/hostapd/tasks/main.yml
@@ -1,18 +0,0 @@
----
-
-- name: "Install package: hostapd" 
-  pacman:
-   name: hostapd 
-   state: present
-   update_cache: yes
-
-- name: "create file: /etc/hostapd/hostapd.conf"
-  template:
-    src: hostapd.conf.j2
-    dest: /etc/hostapd/hostapd.conf
-
-- name: "Enable and restart service: hostapd"
-  systemd:
-   name: hostapd
-   enabled: yes
-   state: started
diff --git a/arch/roles/hostapd/templates/hostapd.conf.j2 b/arch/roles/hostapd/templates/hostapd.conf.j2
@@ -1,23 +0,0 @@
-interface={{hostapd.interface}}
-bridge={{hostapd.bridge}}
-
-ssid={{hostapd.ssid}}
-driver=nl80211
-country_code=DE
-
-hw_mode=g
-channel={{hostapd.channel}}
-
-wpa=2
-auth_algs=1
-
-rsn_pairwise=CCMP
-wpa_key_mgmt=WPA-PSK
-wpa_passphrase={{hostapd.passphrase}}
-
-logger_stdout=-1
-logger_stdout_level=2
-
-ieee80211n=1
-wmm_enabled=1
-ht_capab=[HT40+]
diff --git a/alpine/config-files/cgit/cgit.css b/config-files/cgit/cgit.css
diff --git a/config-files/cgit/cgitrc b/config-files/cgit/cgitrc
@@ -0,0 +1,59 @@
+css=/custom-cgit.css
+logo=/cgit.png
+virtual-root=/
+
+root-title=ctucx.cgit
+root-desc=my personal git repos
+
+local-time=1
+
+cache-size=30
+
+readme=:README.md
+readme=:readme.md
+readme=:README.mkd
+readme=:readme.mkd
+readme=:README.rst
+readme=:readme.rst
+readme=:README.html
+readme=:readme.html
+readme=:README.htm
+readme=:readme.htm
+readme=:README.txt
+readme=:readme.txt
+readme=:README
+readme=:readme
+
+about-filter=/usr/lib/cgit/filters/about-formatting.sh
+source-filter=/usr/lib/cgit/filters/syntax-highlighting.py
+
+snapshots=tar.gz tar.bz2 zip
+
+max-stats=quarter
+
+clone-url=http://cgit.ctu.cx/$CGIT_REPO_URL git@wanderduene.ctu.cx:$CGIT_REPO_URL
+
+enable-commit-graph=1
+
+enable-index-links=1
+enable-index-owner=0
+
+enable-blame=1
+
+enable-log-filecount=1
+enable-log-linecount=1
+
+enable-http-clone=1
+enable-git-config=1
+
+mimetype.gif=image/gif
+mimetype.html=text/html
+mimetype.jpg=image/jpeg
+mimetype.jpeg=image/jpeg
+mimetype.pdf=application/pdf
+mimetype.png=image/png
+mimetype.svg=image/svg+xml
+
+remove-suffix=1
+project-list=/var/lib/git/projects.list
+scan-path=/var/lib/git/repositories
diff --git a/config-files/ferm/lollo.conf b/config-files/ferm/lollo.conf
@@ -0,0 +1,105 @@
+# -*- shell-script -*-
+#
+# Ferm example script
+#
+# Firewall configuration for a router with a dynamic IP.
+#
+# Author: Max Kellermann <max@duempel.org>
+#
+
+@def $DEV_LAN = brlan;
+@def $DEV_WAN = enp2s0;
+
+@def $NET_LAN = 10.0.0.0/24;
+
+# globally accessible services
+@def $WAN_TCP = ( 22 80 443 1234 22000 );
+@def $WAN_UDP = ( 1194 21027 );
+# ( ssh )
+# ( wireguard )
+
+# locally accessible services
+@def $LAN_TCP = ( 53 22 80 443 );
+@def $LAN_UDP = ( 53 67 );
+# ( dns ssh )
+# ( dns dhcp tftp ntp )
+
+# generic input and forwarding rules for ipv4 and ipv6
+domain (ip ip6) {
+    table filter {
+        chain INPUT {
+            policy DROP;
+
+            # connection tracking
+            mod state state INVALID DROP;
+            mod state state (ESTABLISHED RELATED) ACCEPT;
+
+            # allow local connections
+            interface lo ACCEPT;
+            interface $DEV_LAN ACCEPT;
+
+            # respond to ping
+            proto icmp ACCEPT;
+
+            # local services
+            interface $DEV_LAN {
+                proto tcp dport $LAN_TCP ACCEPT;
+                proto udp mod multiport destination-ports $LAN_UDP ACCEPT;
+            }
+
+            proto tcp dport $WAN_TCP ACCEPT;
+            proto udp dport $WAN_UDP ACCEPT;
+            daddr 195.39.246.33/29 ACCEPT;
+            daddr 195.39.246.40/29 ACCEPT;
+        }
+
+        # outgoing connections are not limited
+        chain OUTPUT policy ACCEPT;
+
+        chain FORWARD {
+            policy DROP;
+
+            # connection tracking
+            mod state state INVALID DROP;
+            mod state state (ESTABLISHED RELATED) ACCEPT;
+
+            # local clients can do whatever
+            interface $DEV_LAN ACCEPT;
+
+
+            proto icmp ACCEPT;
+
+            mod conntrack ctstate DNAT ACCEPT;
+
+            # the rest is dropped by the above policy
+        }
+    }
+}
+
+# nat only for ipv4
+domain ip {
+    table nat {
+        chain PREROUTING {
+            policy ACCEPT;
+
+            # port forwards, ala daddr $WAN_IP dport 65522 DNAT to 192.168.0.2:22;
+        }
+
+        chain POSTROUTING {
+            policy ACCEPT;
+
+            outerface $DEV_WAN MASQUERADE;
+            saddr $NET_LAN mod conntrack ctstate DNAT MASQUERADE; # needle point loopback
+        }
+    }
+}
+
+domain ip6 {
+    table filter {
+        chain INPUT {
+            proto ipv6-icmp icmpv6-type redirect DROP;
+            proto ipv6-icmp icmpv6-type 139 DROP;
+            proto ipv6-icmp ACCEPT;
+        }
+    }
+}+
\ No newline at end of file
diff --git a/config-files/grafana/grafana.ini b/config-files/grafana/grafana.ini
@@ -0,0 +1,547 @@
+##################### Grafana Configuration Example #####################
+#
+# Everything has defaults so you only need to uncomment things you want to
+# change
+
+# possible values : production, development
+app_mode = production
+
+# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
+instance_name = ctucx.grafana
+
+#################################### Paths ####################################
+[paths]
+# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
+;data = /var/lib/grafana
+
+# Temporary files in `data` directory older than given duration will be removed
+;temp_data_lifetime = 24h
+
+# Directory where grafana can store logs
+;logs = /var/log/grafana
+
+# Directory where grafana will automatically scan and look for plugins
+;plugins = /var/lib/grafana/plugins
+
+# folder that contains provisioning config files that grafana will apply on startup and while running.
+;provisioning = /etc/grafana/provisioning
+
+#################################### Server ####################################
+[server]
+# Protocol (http, https, h2, socket)
+;protocol = http
+
+# The ip address to bind to, empty will bind to all interfaces
+http_addr = 127.0.0.1
+
+# The http port  to use
+;http_port = 3000
+
+# The public facing domain name used to access grafana from a browser
+domain = grafana.ctu.cx
+
+# Redirect to correct domain if host header does not match domain
+# Prevents DNS rebinding attacks
+enforce_domain = false
+
+# The full public facing url you use in browser, used for redirects and emails
+# If you use reverse proxy and sub path specify full url (with sub path)
+;root_url = https://grafana.ctu.cx
+
+# Serve Grafana from subpath specified in `root_url` setting. By default it is set to `false` for compatibility reasons.
+;serve_from_sub_path = false
+
+# Log web requests
+;router_logging = false
+
+# the path relative working path
+;static_root_path = public
+
+# enable gzip
+;enable_gzip = false
+
+# https certs & key file
+;cert_file =
+;cert_key =
+
+# Unix socket path
+;socket =
+
+#################################### Database ####################################
+[database]
+# You can configure the database connection by specifying type, host, name, user and password
+# as separate properties or as on string using the url properties.
+
+# Either "mysql", "postgres" or "sqlite3", it's your choice
+;type = sqlite3
+;host = 127.0.0.1:3306
+;name = grafana
+;user = root
+# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
+;password =
+
+# Use either URL or the previous fields to configure the database
+# Example: mysql://user:secret@host:port/database
+;url =
+
+# For "postgres" only, either "disable", "require" or "verify-full"
+;ssl_mode = disable
+
+;ca_cert_path =
+;client_key_path =
+;client_cert_path =
+;server_cert_name =
+
+# For "sqlite3" only, path relative to data_path setting
+;path = grafana.db
+
+# Max idle conn setting default is 2
+;max_idle_conn = 2
+
+# Max conn setting default is 0 (mean not set)
+;max_open_conn =
+
+# Connection Max Lifetime default is 14400 (means 14400 seconds or 4 hours)
+;conn_max_lifetime = 14400
+
+# Set to true to log the sql calls and execution times.
+;log_queries =
+
+# For "sqlite3" only. cache mode setting used for connecting to the database. (private, shared)
+;cache_mode = private
+
+#################################### Cache server #############################
+[remote_cache]
+# Either "redis", "memcached" or "database" default is "database"
+;type = database
+
+# cache connectionstring options
+# database: will use Grafana primary database.
+# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=0,ssl=false`. Only addr is required. ssl may be 'true', 'false', or 'insecure'.
+# memcache: 127.0.0.1:11211
+;connstr =
+
+#################################### Data proxy ###########################
+[dataproxy]
+
+# This enables data proxy logging, default is false
+;logging = false
+
+# How long the data proxy waits before timing out, default is 30 seconds.
+# This setting also applies to core backend HTTP data sources where query requests use an HTTP client with timeout set.
+;timeout = 30
+
+# If enabled and user is not anonymous, data proxy will add X-Grafana-User header with username into the request, default is false.
+;send_user_header = false
+
+#################################### Analytics ####################################
+[analytics]
+# Server reporting, sends usage counters to stats.grafana.org every 24 hours.
+# No ip addresses are being tracked, only simple counters to track
+# running instances, dashboard and error counts. It is very helpful to us.
+# Change this option to false to disable reporting.
+reporting_enabled = false
+
+# Set to false to disable all checks to https://grafana.net
+# for new versions (grafana itself and plugins), check is used
+# in some UI views to notify that grafana or plugin update exists
+# This option does not cause any auto updates, nor send any information
+# only a GET request to http://grafana.com to get latest versions
+check_for_updates = false
+
+
+#################################### Security ####################################
+[security]
+disable_initial_admin_creation = true
+disable_gravatar = true
+cookie_secure = true
+
+# set cookie SameSite attribute. defaults to `lax`. can be set to "lax", "strict", "none" and "disabled"
+;cookie_samesite = lax
+
+# set to true if you want to allow browsers to render Grafana in a <frame>, <iframe>, <embed> or <object>. default is false.
+;allow_embedding = false
+
+# Set to true if you want to enable http strict transport security (HSTS) response header.
+# This is only sent when HTTPS is enabled in this configuration.
+# HSTS tells browsers that the site should only be accessed using HTTPS.
+;strict_transport_security = false
+
+# Sets how long a browser should cache HSTS. Only applied if strict_transport_security is enabled.
+;strict_transport_security_max_age_seconds = 86400
+
+# Set to true if to enable HSTS preloading option. Only applied if strict_transport_security is enabled.
+;strict_transport_security_preload = false
+
+# Set to true if to enable the HSTS includeSubDomains option. Only applied if strict_transport_security is enabled.
+;strict_transport_security_subdomains = false
+
+# Set to true to enable the X-Content-Type-Options response header.
+# The X-Content-Type-Options response HTTP header is a marker used by the server to indicate that the MIME types advertised
+# in the Content-Type headers should not be changed and be followed.
+;x_content_type_options = true
+
+# Set to true to enable the X-XSS-Protection header, which tells browsers to stop pages from loading
+# when they detect reflected cross-site scripting (XSS) attacks.
+;x_xss_protection = true
+
+#################################### Snapshots ###########################
+[snapshots]
+# snapshot sharing options
+;external_enabled = true
+;external_snapshot_url = https://snapshots-origin.raintank.io
+;external_snapshot_name = Publish to snapshot.raintank.io
+
+# Set to true to enable this Grafana instance act as an external snapshot server and allow unauthenticated requests for
+# creating and deleting snapshots.
+;public_mode = false
+
+# remove expired snapshot
+;snapshot_remove_expired = true
+
+#################################### Dashboards History ##################
+[dashboards]
+# Number dashboard versions to keep (per dashboard). Default: 20, Minimum: 1
+;versions_to_keep = 20
+
+# Minimum dashboard refresh interval. When set, this will restrict users to set the refresh interval of a dashboard lower than given interval. Per default this is 5 seconds.
+# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
+;min_refresh_interval = 5s
+
+# Path to the default home dashboard. If this value is empty, then Grafana uses StaticRootPath + "dashboards/home.json"
+default_home_dashboard_path = /var/lib/grafana/provisioning/dashboards/home.json
+
+#################################### Users ###############################
+[users]
+allow_sign_up = false
+allow_org_create = false
+default_theme = dark
+viewers_can_edit = true
+disable_login_form = true
+
+# Set to true to disable the signout link in the side menu. useful if you use auth.proxy, defaults to false
+;disable_signout_menu = false
+
+# URL to redirect the user to after sign out
+;signout_redirect_url =
+
+# Set to true to attempt login with OAuth automatically, skipping the login screen.
+# This setting is ignored if multiple OAuth providers are configured.
+;oauth_auto_login = false
+
+# OAuth state max age cookie duration in seconds. Defaults to 600 seconds.
+;oauth_state_cookie_max_age = 600
+
+# limit of api_key seconds to live before expiration
+;api_key_max_seconds_to_live = -1
+
+#################################### Anonymous Auth ######################
+[auth.anonymous]
+# enable anonymous access
+enabled = true
+
+# specify organization name that should be used for unauthenticated users
+org_name = Main Org.
+
+# specify role for unauthenticated users
+org_role = Viewer
+
+# mask the Grafana version number for unauthenticated users
+hide_version = true
+
+#################################### Basic Auth ##########################
+[auth.basic]
+;enabled = true
+
+#################################### SMTP / Emailing ##########################
+[smtp]
+;enabled = false
+;host = localhost:25
+;user =
+# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
+;password =
+;cert_file =
+;key_file =
+;skip_verify = false
+;from_address = admin@grafana.localhost
+;from_name = Grafana
+# EHLO identity in SMTP dialog (defaults to instance_name)
+;ehlo_identity = dashboard.example.com
+# SMTP startTLS policy (defaults to 'OpportunisticStartTLS')
+;startTLS_policy = NoStartTLS
+
+[emails]
+;welcome_email_on_sign_up = false
+;templates_pattern = emails/*.html
+
+#################################### Logging ##########################
+[log]
+# Either "console", "file", "syslog". Default is console and  file
+# Use space to separate multiple modes, e.g. "console file"
+;mode = console file
+
+# Either "debug", "info", "warn", "error", "critical", default is "info"
+;level = info
+
+# optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug
+;filters =
+
+# For "console" mode only
+[log.console]
+;level =
+
+# log line format, valid options are text, console and json
+;format = console
+
+# For "file" mode only
+[log.file]
+;level =
+
+# log line format, valid options are text, console and json
+;format = text
+
+# This enables automated log rotate(switch of following options), default is true
+;log_rotate = true
+
+# Max line number of single file, default is 1000000
+;max_lines = 1000000
+
+# Max size shift of single file, default is 28 means 1 << 28, 256MB
+;max_size_shift = 28
+
+# Segment log daily, default is true
+;daily_rotate = true
+
+# Expired days of log file(delete after max days), default is 7
+;max_days = 7
+
+[log.syslog]
+;level =
+
+# log line format, valid options are text, console and json
+;format = text
+
+# Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
+;network =
+;address =
+
+# Syslog facility. user, daemon and local0 through local7 are valid.
+;facility =
+
+# Syslog tag. By default, the process' argv[0] is used.
+;tag =
+
+#################################### Usage Quotas ########################
+[quota]
+; enabled = false
+
+#### set quotas to -1 to make unlimited. ####
+# limit number of users per Org.
+; org_user = 10
+
+# limit number of dashboards per Org.
+; org_dashboard = 100
+
+# limit number of data_sources per Org.
+; org_data_source = 10
+
+# limit number of api_keys per Org.
+; org_api_key = 10
+
+# limit number of orgs a user can create.
+; user_org = 10
+
+# Global limit of users.
+; global_user = -1
+
+# global limit of orgs.
+; global_org = -1
+
+# global limit of dashboards
+; global_dashboard = -1
+
+# global limit of api_keys
+; global_api_key = -1
+
+# global limit on number of logged in users.
+; global_session = -1
+
+#################################### Alerting ############################
+[alerting]
+# Disable alerting engine & UI features
+enabled = false
+
+
+#################################### Explore #############################
+[explore]
+# Enable the Explore section
+enabled = false
+
+#################################### Internal Grafana Metrics ##########################
+# Metrics available at HTTP API Url /metrics
+[metrics]
+enabled           = false
+
+#################################### Distributed tracing ############
+[tracing.jaeger]
+# Enable by setting the address sending traces to jaeger (ex localhost:6831)
+;address = localhost:6831
+# Tag that will always be included in when creating new spans. ex (tag1:value1,tag2:value2)
+;always_included_tag = tag1:value1
+# Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
+;sampler_type = const
+# jaeger samplerconfig param
+# for "const" sampler, 0 or 1 for always false/true respectively
+# for "probabilistic" sampler, a probability between 0 and 1
+# for "rateLimiting" sampler, the number of spans per second
+# for "remote" sampler, param is the same as for "probabilistic"
+# and indicates the initial sampling rate before the actual one
+# is received from the mothership
+;sampler_param = 1
+# Whether or not to use Zipkin propagation (x-b3- HTTP headers).
+;zipkin_propagation = false
+# Setting this to true disables shared RPC spans.
+# Not disabling is the most common setting when using Zipkin elsewhere in your infrastructure.
+;disable_shared_zipkin_spans = false
+
+#################################### External image storage ##########################
+[external_image_storage]
+# Used for uploading images to public servers so they can be included in slack/email messages.
+# you can choose between (s3, webdav, gcs, azure_blob, local)
+;provider =
+
+[external_image_storage.s3]
+;endpoint =
+;path_style_access =
+;bucket =
+;region =
+;path =
+;access_key =
+;secret_key =
+
+[external_image_storage.webdav]
+;url =
+;public_url =
+;username =
+;password =
+
+[external_image_storage.gcs]
+;key_file =
+;bucket =
+;path =
+
+[external_image_storage.azure_blob]
+;account_name =
+;account_key =
+;container_name =
+
+[external_image_storage.local]
+# does not require any configuration
+
+[rendering]
+# Options to configure a remote HTTP image rendering service, e.g. using https://github.com/grafana/grafana-image-renderer.
+# URL to a remote HTTP image renderer service, e.g. http://localhost:8081/render, will enable Grafana to render panels and dashboards to PNG-images using HTTP requests to an external service.
+;server_url =
+# If the remote HTTP image renderer service runs on a different server than the Grafana server you may have to configure this to a URL where Grafana is reachable, e.g. http://grafana.domain/.
+;callback_url =
+# Concurrent render request limit affects when the /render HTTP endpoint is used. Rendering many images at the same time can overload the server,
+# which this setting can help protect against by only allowing a certain amount of concurrent requests.
+;concurrent_render_request_limit = 30
+
+[panels]
+# If set to true Grafana will allow script tags in text panels. Not recommended as it enable XSS vulnerabilities.
+;disable_sanitize_html = false
+
+[plugins]
+;enable_alpha = false
+;app_tls_skip_verify_insecure = false
+# Enter a comma-separated list of plugin identifiers to identify plugins that are allowed to be loaded even if they lack a valid signature.
+;allow_loading_unsigned_plugins =
+
+#################################### Grafana Image Renderer Plugin ##########################
+[plugin.grafana-image-renderer]
+# Instruct headless browser instance to use a default timezone when not provided by Grafana, e.g. when rendering panel image of alert.
+# See ICU’s metaZones.txt (https://cs.chromium.org/chromium/src/third_party/icu/source/data/misc/metaZones.txt) for a list of supported
+# timezone IDs. Fallbacks to TZ environment variable if not set.
+;rendering_timezone =
+
+# Instruct headless browser instance to use a default language when not provided by Grafana, e.g. when rendering panel image of alert.
+# Please refer to the HTTP header Accept-Language to understand how to format this value, e.g. 'fr-CH, fr;q=0.9, en;q=0.8, de;q=0.7, *;q=0.5'.
+;rendering_language =
+
+# Instruct headless browser instance to use a default device scale factor when not provided by Grafana, e.g. when rendering panel image of alert.
+# Default is 1. Using a higher value will produce more detailed images (higher DPI), but will require more disk space to store an image.
+;rendering_viewport_device_scale_factor =
+
+# Instruct headless browser instance whether to ignore HTTPS errors during navigation. Per default HTTPS errors are not ignored. Due to
+# the security risk it's not recommended to ignore HTTPS errors.
+;rendering_ignore_https_errors =
+
+# Instruct headless browser instance whether to capture and log verbose information when rendering an image. Default is false and will
+# only capture and log error messages. When enabled, debug messages are captured and logged as well.
+# For the verbose information to be included in the Grafana server log you have to adjust the rendering log level to debug, configure
+# [log].filter = rendering:debug.
+;rendering_verbose_logging =
+
+# Instruct headless browser instance whether to output its debug and error messages into running process of remote rendering service.
+# Default is false. This can be useful to enable (true) when troubleshooting.
+;rendering_dumpio =
+
+# Additional arguments to pass to the headless browser instance. Default is --no-sandbox. The list of Chromium flags can be found
+# here (https://peter.sh/experiments/chromium-command-line-switches/). Multiple arguments is separated with comma-character.
+;rendering_args =
+
+# You can configure the plugin to use a different browser binary instead of the pre-packaged version of Chromium.
+# Please note that this is not recommended, since you may encounter problems if the installed version of Chrome/Chromium is not
+# compatible with the plugin.
+;rendering_chrome_bin =
+
+# Instruct how headless browser instances are created. Default is 'default' and will create a new browser instance on each request.
+# Mode 'clustered' will make sure that only a maximum of browsers/incognito pages can execute concurrently.
+# Mode 'reusable' will have one browser instance and will create a new incognito page on each request.
+;rendering_mode =
+
+# When rendering_mode = clustered you can instruct how many browsers or incognito pages can execute concurrently. Default is 'browser'
+# and will cluster using browser instances.
+# Mode 'context' will cluster using incognito pages.
+;rendering_clustering_mode =
+# When rendering_mode = clustered you can define maximum number of browser instances/incognito pages that can execute concurrently..
+;rendering_clustering_max_concurrency =
+
+# Limit the maximum viewport width, height and device scale factor that can be requested.
+;rendering_viewport_max_width =
+;rendering_viewport_max_height =
+;rendering_viewport_max_device_scale_factor =
+
+# Change the listening host and port of the gRPC server. Default host is 127.0.0.1 and default port is 0 and will automatically assign
+# a port not in use.
+;grpc_host =
+;grpc_port =
+
+[enterprise]
+# Path to a valid Grafana Enterprise license.jwt file
+;license_path =
+
+[feature_toggles]
+# enable features, separated by spaces
+;enable =
+
+[date_formats]
+# For information on what formatting patterns that are supported https://momentjs.com/docs/#/displaying/
+
+# Default system date format used in time range picker and other places where full time is displayed
+;full_date = YYYY-MM-DD HH:mm:ss
+
+# Used by graph and other places where we only show small intervals
+;interval_second = HH:mm:ss
+;interval_minute = HH:mm
+;interval_hour = MM/DD HH:mm
+;interval_day = MM/DD
+;interval_month = YYYY-MM
+;interval_year = YYYY
+
+# Experimental feature
+;use_browser_locale = false
+
+# Default timezone for user preferences. Options are 'browser' for the browser local timezone or a timezone name from IANA Time Zone database, e.g. 'UTC' or 'Europe/Amsterdam' etc.
+;default_timezone = browser
diff --git a/alpine/config-files/grafana/provisioning/dashboards/FritzBox.json b/config-files/grafana/provisioning/dashboards/FritzBox.json
diff --git a/alpine/config-files/grafana/provisioning/dashboards/dashboards.yml b/config-files/grafana/provisioning/dashboards/dashboards.yml
diff --git a/alpine/config-files/grafana/provisioning/dashboards/node-exporter.json b/config-files/grafana/provisioning/dashboards/home.json
diff --git a/alpine/config-files/grafana/provisioning/dashboards/node-exporter.json b/config-files/grafana/provisioning/dashboards/node-exporter.json
diff --git a/config-files/grafana/provisioning/dashboards/parkplaetze-kiel.jsom b/config-files/grafana/provisioning/dashboards/parkplaetze-kiel.jsom
@@ -0,0 +1,171 @@
+{
+  "annotations": {
+    "list": [
+      {
+        "builtIn": 1,
+        "datasource": "-- Grafana --",
+        "enable": true,
+        "hide": true,
+        "iconColor": "rgba(0, 211, 255, 1)",
+        "name": "Annotations & Alerts",
+        "type": "dashboard"
+      }
+    ]
+  },
+  "editable": true,
+  "gnetId": null,
+  "graphTooltip": 0,
+  "id": 6,
+  "iteration": 1609020923317,
+  "links": [],
+  "panels": [
+    {
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": "Prometheus",
+      "fill": 1,
+      "fillGradient": 0,
+      "gridPos": {
+        "h": 8,
+        "w": 12,
+        "x": 0,
+        "y": 0
+      },
+      "id": 2,
+      "legend": {
+        "avg": false,
+        "current": false,
+        "max": false,
+        "min": false,
+        "show": true,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 1,
+      "nullPointMode": "null",
+      "options": {
+        "dataLinks": []
+      },
+      "percentage": false,
+      "pointradius": 2,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": false,
+      "steppedLine": false,
+      "targets": [
+        {
+          "expr": "parkplaetze_frei{name=\"$name\"}",
+          "format": "time_series",
+          "legendFormat": "Frei",
+          "refId": "A"
+        },
+        {
+          "expr": "parkplaetze_belegt{name=\"$name\"}",
+          "format": "time_series",
+          "legendFormat": "Belegt",
+          "refId": "B"
+        }
+      ],
+      "thresholds": [],
+      "timeFrom": null,
+      "timeRegions": [],
+      "timeShift": null,
+      "title": "Parkplatz",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        }
+      ],
+      "yaxis": {
+        "align": false,
+        "alignLevel": null
+      }
+    }
+  ],
+  "schemaVersion": 20,
+  "style": "dark",
+  "tags": [],
+  "templating": {
+    "list": [
+      {
+        "allValue": null,
+        "current": {
+          "selected": true,
+          "text": "ZOB - Bahnhof",
+          "value": "ZOB - Bahnhof"
+        },
+        "datasource": "Prometheus",
+        "definition": "label_values(parkplaetze_belegt, name)",
+        "hide": 0,
+        "includeAll": false,
+        "label": null,
+        "multi": false,
+        "name": "name",
+        "options": [],
+        "query": "label_values(parkplaetze_belegt, name)",
+        "refresh": 1,
+        "regex": "",
+        "skipUrlSync": false,
+        "sort": 0,
+        "tagValuesQuery": "",
+        "tags": [],
+        "tagsQuery": "",
+        "type": "query",
+        "useTags": false
+      }
+    ]
+  },
+  "time": {
+    "from": "now-6h",
+    "to": "now"
+  },
+  "timepicker": {
+    "refresh_intervals": [
+      "5s",
+      "10s",
+      "30s",
+      "1m",
+      "5m",
+      "15m",
+      "30m",
+      "1h",
+      "2h",
+      "1d"
+    ]
+  },
+  "timezone": "",
+  "title": "New dashboard Copy",
+  "uid": "WvwCv-bMk",
+  "version": 5
+}
diff --git a/alpine/config-files/grafana/provisioning/datasources/datasources.yml b/config-files/grafana/provisioning/datasources/datasources.yml
diff --git a/alpine/config-files/pleroma/config.exs b/config-files/pleroma/config.exs
diff --git a/alpine/config-files/radicale/config b/config-files/radicale/config
diff --git a/alpine/config-files/riot-web/config.json b/config-files/riot-web/config.json
diff --git a/alpine/config-files/synapse/homeserver.yaml b/config-files/synapse/homeserver.yaml
diff --git a/alpine/config-files/synapse/log.yaml b/config-files/synapse/log.yaml
diff --git a/configuration/joguhrtbecher.yml b/configuration/joguhrtbecher.yml
@@ -0,0 +1,33 @@
+system:
+  hostname: joguhrtbecher
+  domain: ctu.cx
+  timezone: Europe/Berlin
+  enableOwnRepos: true
+  enableSSH: true
+  enableSudo: true
+  useNTP: true
+  nameservers:
+    - 1.1.1.1
+    - 8.8.8.8
+  users:
+    - name: leah
+      groups: "wheel"
+      password: "$6$foobar123$1qcCmnoveirSdWY9XdgH5hCXv32hj0n/AyJX46sSp1LyGCA8QT/xxifebRxr89uIH6vwhzFGgz4.H2sG0en0f0"
+      sshKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCw/G6x8H3ojvHx3NsTswBMMmOhp48F3rea0GUniKSvRLMRIti5b7Q4P4FXnkQEtuNSR3u7gE5r4EacaLaIx7Az9SgHRoE+hdzSo4mPAwKTx/E3HZgIjdZhTDL8PAn4SZZT6RBqr/uGb+x9fdIjY0FbdNBLjq0MNnG3T+qd1joUL8JXoS7F//ac52RhHlsA5qJXFDOhpqR/7hRMwOFNH0GKaLN1xQKcOjhpIcdswpOf8kRDVpT7xOYwfXCFF4MaY2M8047WKarvEnGdADIIw6bvWsdJINehtOQmYEFRaMuaWp1d9bglZXZKPQKNubv5lqneMP4AI7ImDYjgW6eNLIT1 cardno:000603502829"
+
+services:
+  prometheus_node_exporter:
+    enable: true
+
+  syncthing:
+    enable: true
+    user: leah
+
+  nginx:
+    enable: true
+    vhosts:
+      taurus.ctu.cx:
+        defaultServer: true
+        locations:
+          - path: /node-exporter
+            proxy: http://127.0.0.1:9100
diff --git a/configuration/taurus.yml b/configuration/taurus.yml
@@ -0,0 +1,157 @@
+system:
+  hostname: taurus
+  domain: ctu.cx
+  timezone: Europe/Berlin
+  alpineVersion: edge
+  enableOwnRepos: true
+  enableSSH: true
+  enableSudo: true
+  useNTP: true
+  nameservers:
+    - 1.1.1.1
+    - 8.8.8.8
+  users:
+    - name: leah
+      groups: "wheel"
+      password: "$6$foobar123$1qcCmnoveirSdWY9XdgH5hCXv32hj0n/AyJX46sSp1LyGCA8QT/xxifebRxr89uIH6vwhzFGgz4.H2sG0en0f0"
+      sshKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCw/G6x8H3ojvHx3NsTswBMMmOhp48F3rea0GUniKSvRLMRIti5b7Q4P4FXnkQEtuNSR3u7gE5r4EacaLaIx7Az9SgHRoE+hdzSo4mPAwKTx/E3HZgIjdZhTDL8PAn4SZZT6RBqr/uGb+x9fdIjY0FbdNBLjq0MNnG3T+qd1joUL8JXoS7F//ac52RhHlsA5qJXFDOhpqR/7hRMwOFNH0GKaLN1xQKcOjhpIcdswpOf8kRDVpT7xOYwfXCFF4MaY2M8047WKarvEnGdADIIw6bvWsdJINehtOQmYEFRaMuaWp1d9bglZXZKPQKNubv5lqneMP4AI7ImDYjgW6eNLIT1 cardno:000603502829"
+
+network:
+  awall:
+    enable: true
+    config:
+      zones:
+        WAN:
+          - iface: eth0
+      policies:
+        - in: _fw
+          action: accept
+        - in: _fw
+          out:  WAN
+          action: accept
+        - in: WAN
+          action: drop
+      filters:
+        - in: _fw
+          out: WAN
+          service:
+            - dns
+            - http
+            - https
+            - ssh
+        - in: WAN
+          out: _fw
+          service: 
+            - ping
+          action: accept
+  interfaces:
+    - name: lo
+      loopback: true
+    - name: eth0
+      ipv4:
+        address: 37.221.196.131
+        gateway: 37.221.196.1
+        netmask: 255.255.255.0
+      ipv6:
+        address: 2a03:4000:9:f8::1
+        gateway: fe80::1
+        netmask: 64
+
+services:
+  prometheus_node_exporter:
+    enable: true
+
+  bind:
+    enable: true
+    zonesRepo: https://cgit.ctu.cx/dns-zones
+    serveDomains:
+      - ctu.cx
+      - ctucx.de
+      - thein.ovh
+      - antifa.jetzt
+      - oeffisear.ch
+
+  acme_redirect:
+    enable: true
+    email: lets-encrypt@ctu.cx
+    acme_url: https://api.buypass.com/acme/directory
+    certs:
+      taurus.ctu.cx:
+        dns_names: 
+          - taurus.ctu.cx
+        renew_tasks:
+          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/taurus.ctu.cx
+          - sudo rc-service nginx restart
+      syncthing.ctu.cx:
+        dns_names: 
+          - syncthing.ctu.cx
+        renew_tasks:
+          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/syncthing.ctu.cx
+          - sudo rc-service nginx restart
+      restic.ctu.cx:
+        dns_names: 
+          - restic.ctu.cx
+        renew_tasks:
+          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/restic.ctu.cx
+          - sudo rc-service nginx restart
+      photos.ctu.cx:
+        dns_names: 
+          - photos.ctu.cx
+        renew_tasks:
+          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/photo.ctu.cx
+          - sudo rc-service nginx restart
+
+  nginx:
+    enable: true
+    sslOnly: true
+    vhosts:
+      taurus.ctu.cx:
+        defaultServer: true
+        ssl:
+          enabled: true
+          cert: "/var/lib/acme-redirect/live/taurus.ctu.cx/fullchain"
+          privkey: "/var/lib/acme-redirect/live/taurus.ctu.cx/privkey"
+        locations:
+          - path: /node-exporter
+            proxy: http://127.0.0.1:9100
+      photos.ctu.cx:
+        ssl:
+          enable: true
+          cert: "/var/lib/acme-redirect/live/photos.ctu.cx/fullchain"
+          privkey: "/var/lib/acme-redirect/live/photos.ctu.cx/privkey"
+        root: /var/lib/websites/photos.ctu.cx
+        locations:
+          - path: '~* \.(html)$'
+            extraConfig: "
+              add_header Last-Modified $date_gmt;
+              add_header Cache-Control 'private no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0';
+              if_modified_since off;
+              expires off;
+              etag off;
+            "
+
+  syncthing:
+    enable: true
+    user: leah
+    nginx:
+      enable: true
+      domain: "syncthing.ctu.cx"
+      sslOnly: true
+      ssl:
+        enable: true
+        cert: "/var/lib/acme-redirect/live/syncthing.ctu.cx/fullchain"
+        privkey: "/var/lib/acme-redirect/live/syncthing.ctu.cx/privkey"
+
+  rest_server:
+    enable: true
+    port: 8060
+    user: leah
+    nginx:
+      enable: true
+      domain: "restic.ctu.cx"
+      password: "{{ lookup('diskcache', 'community.general.passwordstore', 'server/taurus/rest-server.htpasswd returnall=true') }}"
+      sslOnly: true
+      ssl:
+        enable: true
+        cert: "/var/lib/acme-redirect/live/restic.ctu.cx/fullchain"
+        privkey: "/var/lib/acme-redirect/live/restic.ctu.cx/privkey"
diff --git a/configuration/wanderduene.yml b/configuration/wanderduene.yml
@@ -0,0 +1,413 @@
+system:
+  hostname: wanderduene
+  domain: ctu.cx
+  timezone: Europe/Berlin
+  alpineVersion: edge
+  enableOwnRepos: true
+  enableSSH: true
+  enableSudo: true
+  useNTP: true #todo: support archlinux
+  nameservers:
+    - 1.1.1.1
+    - 8.8.8.8
+  users:
+    - name: leah
+      groups: "wheel"
+      password: "$6$foobar123$1qcCmnoveirSdWY9XdgH5hCXv32hj0n/AyJX46sSp1LyGCA8QT/xxifebRxr89uIH6vwhzFGgz4.H2sG0en0f0"
+      sshKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCw/G6x8H3ojvHx3NsTswBMMmOhp48F3rea0GUniKSvRLMRIti5b7Q4P4FXnkQEtuNSR3u7gE5r4EacaLaIx7Az9SgHRoE+hdzSo4mPAwKTx/E3HZgIjdZhTDL8PAn4SZZT6RBqr/uGb+x9fdIjY0FbdNBLjq0MNnG3T+qd1joUL8JXoS7F//ac52RhHlsA5qJXFDOhpqR/7hRMwOFNH0GKaLN1xQKcOjhpIcdswpOf8kRDVpT7xOYwfXCFF4MaY2M8047WKarvEnGdADIIw6bvWsdJINehtOQmYEFRaMuaWp1d9bglZXZKPQKNubv5lqneMP4AI7ImDYjgW6eNLIT1 cardno:000603502829"
+
+network:
+  awall:
+    enable: true
+    config:
+      zones:
+        WAN:
+          - iface: eth0
+      policies:
+        - in: _fw
+          action: accept
+        - in: _fw
+          out:  WAN
+          action: accept
+        - in: WAN
+          action: drop
+      filters:
+        - in: _fw
+          out: WAN
+          service:
+            - dns
+            - http
+            - https
+            - ssh
+        - in: WAN
+          out: _fw
+          service: 
+            - ping
+          action: accept
+  interfaces:
+    - name: lo
+      loopback: true
+    - name: eth0
+      ipv4:
+        address: 46.38.253.139
+        gateway: 46.38.253.1
+        netmask: 255.255.255.0
+      ipv6:
+        address: 2a03:4000:1:45d::1
+        gateway: fe80::1
+        netmask: 64
+
+services:
+  prometheus_node_exporter:
+    enable: true
+
+  postgresql:
+    enable: true
+
+  bind:
+    enable: true
+    zonesRepo: https://cgit.ctu.cx/dns-zones
+    serveDomains:
+      - ctu.cx
+      - ctucx.de
+      - thein.ovh
+      - antifa.jetzt
+      - oeffisear.ch
+
+  acme_redirect:
+    enable: true
+    email: lets-encrypt@ctu.cx
+    acme_url: https://api.buypass.com/acme/directory
+    certs:
+      wanderduene.ctu.cx:
+        dns_names: 
+          - wanderduene.ctu.cx
+        renew_tasks:
+          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/wanderduene.ctu.cx
+          - sudo rc-service nginx restart
+          - sudo rc-service maddy restart
+      ctucx.de:
+        dns_names:
+          - ctucx.de
+        renew_tasks:
+          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/ctucx.de
+          - sudo rc-service nginx restart
+      ctu.cx:
+        dns_names:
+          - ctu.cx
+        renew_tasks:
+          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/ctu.cx
+          - sudo rc-service nginx restart
+      matrix.ctu.cx:
+        dns_names:
+          - matrix.ctu.cx
+        renew_tasks:
+          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/matrix.ctu.cx
+          - sudo rc-service nginx restart
+      dav.ctu.cx:
+        dns_names: 
+          - dav.ctu.cx
+        renew_tasks:
+          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/dav.ctu.cx
+          - sudo rc-service nginx restart
+      cgit.ctu.cx:
+        dns_names:
+          - cgit.ctu.cx
+        renew_tasks:
+          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/cgit.ctu.cx
+          - sudo rc-service nginx restart
+      prometheus.ctu.cx:
+        dns_names: 
+          - prometheus.ctu.cx
+        renew_tasks:
+          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/prometheus.ctu.cx
+          - sudo rc-service nginx restart
+      grafana.ctu.cx:
+        dns_names:
+          - grafana.ctu.cx
+        renew_tasks:
+          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/grafana.ctu.cx
+          - sudo rc-service nginx restart
+      pleroma.ctu.cx:
+        dns_names:
+          - pleroma.ctu.cx
+        renew_tasks:
+          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/pleroma.ctu.cx
+          - sudo rc-service nginx restart
+      frp.ctu.cx:
+        dns_names:
+          - frp.ctu.cx
+          - stasicontainer-mac.frp.ctu.cx
+          - stasicontainer.frp.ctu.cx
+          - coladose.frp.ctu.cx
+          - toaster.frp.ctu.cx
+          - joghurtbecher.frp.ctu.cx
+          - isa.frp.ctu.cx
+          - isa-mac.frp.ctu.cx
+        renew_tasks:
+          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/frp.ctu.cx
+          - sudo rc-service nginx restart
+      oeffi.ctu.cx:
+        dns_names:
+          - oeffi.ctu.cx
+        renew_tasks:
+          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/oeffi.ctu.cx
+          - sudo rc-service nginx restart
+      repo.f2k1.de:
+        dns_names:
+          - repo.f2k1.de
+        renew_tasks:
+          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/repo.f2k1.de
+          - sudo rc-service nginx restart
+      oeffisear.ch:
+        dns_names:
+          - oeffisear.ch
+        renew_tasks:
+          - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/oeffisear.ch
+          - sudo rc-service nginx restart
+
+  nginx:
+    enable: true
+    sslOnly: true
+    vhosts:
+      wanderduene.ctu.cx:
+        defaultServer: true
+        ssl:
+          enable: true
+          cert: "/var/lib/acme-redirect/live/wanderduene.ctu.cx/fullchain"
+          privkey: "/var/lib/acme-redirect/live/wanderduene.ctu.cx/privkey"
+        locations:
+          - path: /node-exporter
+            proxy: http://127.0.0.1:9100/metrics
+      ctu.cx:
+        ssl:
+          enable: true
+          cert: "/var/lib/acme-redirect/live/ctu.cx/fullchain"
+          privkey: "/var/lib/acme-redirect/live/ctu.cx/privkey"
+        root: /var/lib/websites/ctu.cx
+        locations:
+          - path: "/.well-known/host-meta"
+            extraConfig: "return 301 https://pleroma.ctu.cx$request_uri;"
+          - path: "/.well-known/matrix/client"
+            extraConfig: '
+              add_header Content-Type application/json;
+              return 200 "{\"m.homeserver\": {\"base_url\": \"https://matrix.ctu.cx\"}}";
+            '
+          - path: "/.well-known/matrix/server"
+            extraConfig: '
+              add_header Content-Type application/json;
+              return 200 "{\"m.server\": \"matrix.ctu.cx:443\"}";
+            '
+          - path: "/vodafone-map"
+            extraConfig: '
+              proxy_set_header Accept-Encoding "";
+              proxy_pass https://netmap.vodafone.de/arcgis/rest/services/CoKart/netzabdeckung_mobilfunk_4x/MapServer;
+            '
+          - path: "/magenta-at-map"
+            extraConfig: '
+              proxy_set_header Accept-Encoding "";
+              proxy_pass https://app.wigeogis.com/kunden/tmobile/data/geoserver.php;
+            '
+          - path: "/drei-at-data"
+            extraConfig: '
+              proxy_set_header Accept-Encoding "";
+              proxy_pass https://www.drei.at/media/common/netzabdeckung;
+              proxy_hide_header "access-control-allow-origin";
+              add_header "access-control-allow-origin" "*";
+            '
+          - path: "/nuc8rugged"
+            extraConfig: '
+              autoindex on;
+              autoindex_exact_size off;
+            '
+          - path: "/drucken"
+            extraConfig: '
+              autoindex on;
+              autoindex_exact_size off;
+              auth_basic "Restricted Content";
+              auth_basic_user_file /etc/nginx/passwd/print;
+            '
+      repo.f2k1.de:
+        ssl:
+          enable: true
+          cert: "/var/lib/acme-redirect/live/repo.f2k1.de/fullchain"
+          privkey: "/var/lib/acme-redirect/live/repo.f2k1.de/privkey"
+        locations:
+          - path: /
+            proxy: http://127.0.0.1:8088
+
+  gitolite:
+    enable: true
+    initialKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCw/G6x8H3ojvHx3NsTswBMMmOhp48F3rea0GUniKSvRLMRIti5b7Q4P4FXnkQEtuNSR3u7gE5r4EacaLaIx7Az9SgHRoE+hdzSo4mPAwKTx/E3HZgIjdZhTDL8PAn4SZZT6RBqr/uGb+x9fdIjY0FbdNBLjq0MNnG3T+qd1joUL8JXoS7F//ac52RhHlsA5qJXFDOhpqR/7hRMwOFNH0GKaLN1xQKcOjhpIcdswpOf8kRDVpT7xOYwfXCFF4MaY2M8047WKarvEnGdADIIw6bvWsdJINehtOQmYEFRaMuaWp1d9bglZXZKPQKNubv5lqneMP4AI7ImDYjgW6eNLIT1 cardno:000603502829"
+
+
+  cgit:
+    enable: true
+    configFile: config-files/cgit/cgitrc
+    customCssFile: config-files/cgit/cgit.css
+    nginx:
+      enable: true
+      domain: "cgit.ctu.cx"
+      sslOnly: true
+      ssl:
+        enable: true
+        cert: "/var/lib/acme-redirect/live/cgit.ctu.cx/fullchain"
+        privkey: "/var/lib/acme-redirect/live/cgit.ctu.cx/privkey"
+
+  oeffisearch:
+    enable: true
+    instances: 4 #currently not used and allways 4
+    nginx:
+      enable: true
+      domain: "oeffisear.ch"
+      sslOnly: true
+      ssl:
+        enable: true
+        cert: "/var/lib/acme-redirect/live/oeffisear.ch/fullchain"
+        privkey: "/var/lib/acme-redirect/live/oeffisear.ch/privkey"
+
+  oeffi_web:
+    enable: true
+    instances: 4 #currently not used and allways 4
+    nginx:
+      enable: true
+      domain: "oeffi.ctu.cx"
+      sslOnly: true
+      ssl:
+        enable: true
+        cert: "/var/lib/acme-redirect/live/oeffi.ctu.cx/fullchain"
+        privkey: "/var/lib/acme-redirect/live/oeffi.ctu.cx/privkey"
+
+  maddy:
+    enable: true
+    hostname: "wanderduene.ctu.cx"
+    ssl_cert: "/var/lib/acme-redirect/live/wanderduene.ctu.cx/fullchain"
+    ssl_privkey: "/var/lib/acme-redirect/live/wanderduene.ctu.cx/privkey"
+
+  radicale:
+    enable: true
+    configFile: config-files/radicale/config
+    users: "{{ lookup('diskcache', 'community.general.passwordstore', 'server/wanderduene/radicale.users returnall=true')}}"
+    nginx:
+      enable: true
+      domain: "dav.ctu.cx"
+      sslOnly: true
+      ssl:
+        enable: true
+        cert: "/var/lib/acme-redirect/live/dav.ctu.cx/fullchain"
+        privkey: "/var/lib/acme-redirect/live/dav.ctu.cx/privkey"
+
+  synapse:
+    enable: true
+    configPath: config-files/synapse
+    webClient:
+      enable: true
+      configFile: config-files/riot-web/config.json
+    nginx:
+      enable: true
+      domain: "matrix.ctu.cx"
+      sslOnly: true
+      ssl:
+        enable: true
+        cert: "/var/lib/acme-redirect/live/matrix.ctu.cx/fullchain"
+        privkey: "/var/lib/acme-redirect/live/matrix.ctu.cx/privkey"
+
+  prometheus:
+    enable: true
+    nginx:
+      enable: true
+      domain: "prometheus.ctu.cx"
+      sslOnly: true
+      ssl:
+        enable: true
+        cert: "/var/lib/acme-redirect/live/prometheus.ctu.cx/fullchain"
+        privkey: "/var/lib/acme-redirect/live/prometheus.ctu.cx/privkey"
+    config:
+      global:
+        scrape_interval: 20s
+        evaluation_interval: 1m
+      scrape_configs:
+        - job_name: 'prometheus'
+          static_configs:
+          - targets: ['localhost:9090']
+
+        - job_name: 'node-exporter'
+          metrics_path: '/node-exporter'
+          scheme: 'https'
+          scrape_interval: 30s
+          static_configs:
+          - targets: [
+            'wanderduene.ctu.cx',
+            'taurus.ctu.cx',
+            'lollo.ctu.cx',
+            'repo.f2k1.de',
+            'toaster.frp.ctu.cx',
+            'stasicontainer-mac.frp.ctu.cx'
+          ]
+
+        - job_name: 'fritzbox-exporter'
+          metrics_path: '/metrics'
+          scheme: 'https'
+          scrape_interval: 30s
+          static_configs:
+          - targets: [
+            'fbexporter.ctu.cx',
+            'fbexporter.f2k1.de'
+          ]
+
+        - job_name: 'parkplatz-exporter'
+          metrics_path: '/parkplaetze.php'
+          scheme: 'https'
+          scrape_interval: 5m
+          static_configs:
+          - targets: [
+            'f2k1.de'
+          ]
+
+  grafana:
+    enable: true
+    configFile: config-files/grafana/grafana.ini
+    provisioningPath: config-files/grafana/provisioning
+    nginx:
+      enable: true
+      domain: "grafana.ctu.cx"
+      sslOnly: true
+      ssl:
+        enable: true
+        cert: "/var/lib/acme-redirect/live/grafana.ctu.cx/fullchain"
+        privkey: "/var/lib/acme-redirect/live/grafana.ctu.cx/privkey"
+
+  pleroma:
+    enable: true
+    configFile: config-files/pleroma/config.exs
+    secretsContent: "{{ lookup('diskcache', 'community.general.passwordstore', 'server/{{system.hostname}}/pleroma.secrets returnall=true')}}"
+    nginx:
+      enable: true
+      domain: "pleroma.ctu.cx"
+      sslOnly: true
+      ssl:
+        enable: true
+        cert: "/var/lib/acme-redirect/live/pleroma.ctu.cx/fullchain"
+        privkey: "/var/lib/acme-redirect/live/pleroma.ctu.cx/privkey"
+
+  frps:
+    enable: true
+    token: "{{ lookup('diskcache', 'community.general.passwordstore', 'server/wanderduene/frps/token returnall=true')}}"
+    port: 5050
+    vhostDomain: "frp.ctu.cx"
+    vhostPort: 8088
+    nginx:
+      enable: true
+      sslOnly: true
+      ssl:
+        enable: true
+        cert: "/var/lib/acme-redirect/live/frp.ctu.cx/fullchain"
+        privkey: "/var/lib/acme-redirect/live/frp.ctu.cx/privkey"
+      vhosts:
+        - stasicontainer-mac
+        - stasicontainer
+        - coladose
+        - toaster
+        - isa
+        - isa-mac
+        - joghurtbecher
diff --git a/inventory b/inventory
@@ -0,0 +1,12 @@
+[all:vars]
+ansible_ssh_user=root
+
+[taurus]
+taurus.ctu.cx
+
+
+[wanderduene]
+wanderduene.ctu.cx
+
+[lollo]
+10.0.0.1+
\ No newline at end of file
diff --git a/lookup_plugins/diskcache.py b/lookup_plugins/diskcache.py
@@ -0,0 +1,2437 @@
+"""
+An Ansible lookup plugin that caches the results of any other lookup, most
+useful in group/host vars.
+
+By default, Ansible evaluates any lookups in a group/host var whenever the var
+is accessed. For example, given a group/host var:
+
+.. code-block:: yaml
+
+    content: "{{ lookup('pipe', 'a-very-slow-command') }}"
+
+any tasks that access ``content`` (e.g. in a template) will re-evaluate
+the lookup, which adds up very quickly.
+
+.. seealso:: :attr:`.DOCUMENTATION`, :attr:`.EXAMPLES`, `ansible/ansible#9623
+    <https://github.com/ansible/ansible/issues/9623>`_
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os.path
+import codecs
+import contextlib as cl
+import errno
+import functools as ft
+import io
+import json
+import os
+import os.path as op
+import pickle
+import pickletools
+import sqlite3
+import struct
+import tempfile
+import threading
+import time
+import warnings
+import zlib
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.plugins.loader import lookup_loader
+from ansible.plugins.lookup import LookupBase
+
+__version__ = "1.0.0"
+
+
+DOCUMENTATION = """
+lookup: cached
+short_description: cache the result of a lookup
+description:
+  - Run a lookup and cache the result for the duration of the play. This is
+    most useful for lookups in group/host vars, which are typically
+    re-evaluated every time they are used
+requirements:
+  - diskcache U(https://pypi.org/project/diskcache/)
+options:
+  _terms:
+    description: the lookup and any arguments
+    required: True
+notes:
+  - Results are cached in C(DEFAULT_LOCAL_TMP) and will be deleted at the end of
+    the play.
+"""
+
+EXAMPLES = """
+group_var1: "{{ lookup('cached', 'pipe', 'a-very-slow-command') }}"
+"""
+
+
+try:
+    from __main__ import display
+except ImportError:
+    from ansible.utils.display import Display
+
+    display = Display()
+
+
+class Constant(tuple):
+    "Pretty display of immutable constant."
+    def __new__(cls, name):
+        return tuple.__new__(cls, (name,))
+
+    def __repr__(self):
+        return '%s' % self[0]
+
+DBNAME = 'cache.db'
+ENOVAL = Constant('ENOVAL')
+UNKNOWN = Constant('UNKNOWN')
+
+MODE_NONE = 0
+MODE_RAW = 1
+MODE_BINARY = 2
+MODE_TEXT = 3
+MODE_PICKLE = 4
+
+DEFAULT_SETTINGS = {
+    u'statistics': 0,  # False
+    u'tag_index': 0,   # False
+    u'eviction_policy': u'least-recently-stored',
+    u'size_limit': 2 ** 30,  # 1gb
+    u'cull_limit': 10,
+    u'sqlite_auto_vacuum': 1,        # FULL
+    u'sqlite_cache_size': 2 ** 13,   # 8,192 pages
+    u'sqlite_journal_mode': u'wal',
+    u'sqlite_mmap_size': 2 ** 26,    # 64mb
+    u'sqlite_synchronous': 1,        # NORMAL
+    u'disk_min_file_size': 2 ** 15,  # 32kb
+    u'disk_pickle_protocol': pickle.HIGHEST_PROTOCOL,
+}
+
+METADATA = {
+    u'count': 0,
+    u'size': 0,
+    u'hits': 0,
+    u'misses': 0,
+}
+
+EVICTION_POLICY = {
+    'none': {
+        'init': None,
+        'get': None,
+        'cull': None,
+    },
+    'least-recently-stored': {
+        'init': (
+            'CREATE INDEX IF NOT EXISTS Cache_store_time ON'
+            ' Cache (store_time)'
+        ),
+        'get': None,
+        'cull': 'SELECT {fields} FROM Cache ORDER BY store_time LIMIT ?',
+    },
+    'least-recently-used': {
+        'init': (
+            'CREATE INDEX IF NOT EXISTS Cache_access_time ON'
+            ' Cache (access_time)'
+        ),
+        'get': 'access_time = {now}',
+        'cull': 'SELECT {fields} FROM Cache ORDER BY access_time LIMIT ?',
+    },
+    'least-frequently-used': {
+        'init': (
+            'CREATE INDEX IF NOT EXISTS Cache_access_count ON'
+            ' Cache (access_count)'
+        ),
+        'get': 'access_count = access_count + 1',
+        'cull': 'SELECT {fields} FROM Cache ORDER BY access_count LIMIT ?',
+    },
+}
+
+class Disk(object):
+    "Cache key and value serialization for SQLite database and files."
+    def __init__(self, directory, min_file_size=0, pickle_protocol=0):
+        """Initialize disk instance.
+
+        :param str directory: directory path
+        :param int min_file_size: minimum size for file use
+        :param int pickle_protocol: pickle protocol for serialization
+
+        """
+        self._directory = directory
+        self.min_file_size = min_file_size
+        self.pickle_protocol = pickle_protocol
+
+
+    def hash(self, key):
+        """Compute portable hash for `key`.
+
+        :param key: key to hash
+        :return: hash value
+
+        """
+        mask = 0xFFFFFFFF
+        disk_key, _ = self.put(key)
+        type_disk_key = type(disk_key)
+
+        if type_disk_key is sqlite3.Binary:
+            return zlib.adler32(disk_key) & mask
+        elif type_disk_key is str:
+            return zlib.adler32(disk_key.encode('utf-8')) & mask  # noqa
+        elif type_disk_key is int:
+            return disk_key % mask
+        else:
+            assert type_disk_key is float
+            return zlib.adler32(struct.pack('!d', disk_key)) & mask
+
+
+    def put(self, key):
+        """Convert `key` to fields key and raw for Cache table.
+
+        :param key: key to convert
+        :return: (database key, raw boolean) pair
+
+        """
+        # pylint: disable=unidiomatic-typecheck
+        type_key = type(key)
+
+        if type_key is bytes:
+            return sqlite3.Binary(key), True
+        elif ((type_key is str)
+                or (type_key is int
+                    and -9223372036854775808 <= key <= 9223372036854775807)
+                or (type_key is float)):
+            return key, True
+        else:
+            data = pickle.dumps(key, protocol=self.pickle_protocol)
+            result = pickletools.optimize(data)
+            return sqlite3.Binary(result), False
+
+
+    def get(self, key, raw):
+        """Convert fields `key` and `raw` from Cache table to key.
+
+        :param key: database key to convert
+        :param bool raw: flag indicating raw database storage
+        :return: corresponding Python key
+
+        """
+        # pylint: disable=no-self-use,unidiomatic-typecheck
+        if raw:
+            return bytes(key) if type(key) is sqlite3.Binary else key
+        else:
+            return pickle.load(io.BytesIO(key))
+
+
+    def store(self, value, read, key=UNKNOWN):
+        """Convert `value` to fields size, mode, filename, and value for Cache
+        table.
+
+        :param value: value to convert
+        :param bool read: True when value is file-like object
+        :param key: key for item (default UNKNOWN)
+        :return: (size, mode, filename, value) tuple for Cache table
+
+        """
+        # pylint: disable=unidiomatic-typecheck
+        type_value = type(value)
+        min_file_size = self.min_file_size
+
+        if ((type_value is str and len(value) < min_file_size)
+                or (type_value is int
+                    and -9223372036854775808 <= value <= 9223372036854775807)
+                or (type_value is float)):
+            return 0, MODE_RAW, None, value
+        elif type_value is bytes:
+            if len(value) < min_file_size:
+                return 0, MODE_RAW, None, sqlite3.Binary(value)
+            else:
+                filename, full_path = self.filename(key, value)
+
+                with open(full_path, 'wb') as writer:
+                    writer.write(value)
+
+                return len(value), MODE_BINARY, filename, None
+        elif type_value is str:
+            filename, full_path = self.filename(key, value)
+
+            with open(full_path, 'w', encoding='UTF-8') as writer:
+                writer.write(value)
+
+            size = op.getsize(full_path)
+            return size, MODE_TEXT, filename, None
+        elif read:
+            size = 0
+            reader = ft.partial(value.read, 2 ** 22)
+            filename, full_path = self.filename(key, value)
+
+            with open(full_path, 'wb') as writer:
+                for chunk in iter(reader, b''):
+                    size += len(chunk)
+                    writer.write(chunk)
+
+            return size, MODE_BINARY, filename, None
+        else:
+            result = pickle.dumps(value, protocol=self.pickle_protocol)
+
+            if len(result) < min_file_size:
+                return 0, MODE_PICKLE, None, sqlite3.Binary(result)
+            else:
+                filename, full_path = self.filename(key, value)
+
+                with open(full_path, 'wb') as writer:
+                    writer.write(result)
+
+                return len(result), MODE_PICKLE, filename, None
+
+
+    def fetch(self, mode, filename, value, read):
+        """Convert fields `mode`, `filename`, and `value` from Cache table to
+        value.
+
+        :param int mode: value mode raw, binary, text, or pickle
+        :param str filename: filename of corresponding value
+        :param value: database value
+        :param bool read: when True, return an open file handle
+        :return: corresponding Python value
+
+        """
+        # pylint: disable=no-self-use,unidiomatic-typecheck
+        if mode == MODE_RAW:
+            return bytes(value) if type(value) is sqlite3.Binary else value
+        elif mode == MODE_BINARY:
+            if read:
+                return open(op.join(self._directory, filename), 'rb')
+            else:
+                with open(op.join(self._directory, filename), 'rb') as reader:
+                    return reader.read()
+        elif mode == MODE_TEXT:
+            full_path = op.join(self._directory, filename)
+            with open(full_path, 'r', encoding='UTF-8') as reader:
+                return reader.read()
+        elif mode == MODE_PICKLE:
+            if value is None:
+                with open(op.join(self._directory, filename), 'rb') as reader:
+                    return pickle.load(reader)
+            else:
+                return pickle.load(io.BytesIO(value))
+
+
+    def filename(self, key=UNKNOWN, value=UNKNOWN):
+        """Return filename and full-path tuple for file storage.
+
+        Filename will be a randomly generated 28 character hexadecimal string
+        with ".val" suffixed. Two levels of sub-directories will be used to
+        reduce the size of directories. On older filesystems, lookups in
+        directories with many files may be slow.
+
+        The default implementation ignores the `key` and `value` parameters.
+
+        In some scenarios, for example :meth:`Cache.push
+        <diskcache.Cache.push>`, the `key` or `value` may not be known when the
+        item is stored in the cache.
+
+        :param key: key for item (default UNKNOWN)
+        :param value: value for item (default UNKNOWN)
+
+        """
+        # pylint: disable=unused-argument
+        hex_name = codecs.encode(os.urandom(16), 'hex').decode('utf-8')
+        sub_dir = op.join(hex_name[:2], hex_name[2:4])
+        name = hex_name[4:] + '.val'
+        directory = op.join(self._directory, sub_dir)
+
+        try:
+            os.makedirs(directory)
+        except OSError as error:
+            if error.errno != errno.EEXIST:
+                raise
+
+        filename = op.join(sub_dir, name)
+        full_path = op.join(self._directory, filename)
+        return filename, full_path
+
+
+    def remove(self, filename):
+        """Remove a file given by `filename`.
+
+        This method is cross-thread and cross-process safe. If an "error no
+        entry" occurs, it is suppressed.
+
+        :param str filename: relative path to file
+
+        """
+        full_path = op.join(self._directory, filename)
+
+        try:
+            os.remove(full_path)
+        except WindowsError:
+            pass
+        except OSError as error:
+            if error.errno != errno.ENOENT:
+                # ENOENT may occur if two caches attempt to delete the same
+                # file at the same time.
+                raise
+
+class Cache(object):
+    "Disk and file backed cache."
+    def __init__(self, directory=None, timeout=60, disk=Disk, **settings):
+        """Initialize cache instance.
+
+        :param str directory: cache directory
+        :param float timeout: SQLite connection timeout
+        :param disk: Disk type or subclass for serialization
+        :param settings: any of DEFAULT_SETTINGS
+
+        """
+        try:
+            assert issubclass(disk, Disk)
+        except (TypeError, AssertionError):
+            raise ValueError('disk must subclass diskcache.Disk') from None
+
+        if directory is None:
+            directory = tempfile.mkdtemp(prefix='diskcache-')
+        directory = op.expanduser(directory)
+        directory = op.expandvars(directory)
+
+        self._directory = directory
+        self._timeout = 0  # Manually handle retries during initialization.
+        self._local = threading.local()
+        self._txn_id = None
+
+        if not op.isdir(directory):
+            try:
+                os.makedirs(directory, 0o755)
+            except OSError as error:
+                if error.errno != errno.EEXIST:
+                    raise EnvironmentError(
+                        error.errno,
+                        'Cache directory "%s" does not exist'
+                        ' and could not be created' % self._directory
+                    ) from None
+
+        sql = self._sql_retry
+
+        # Setup Settings table.
+
+        try:
+            current_settings = dict(sql(
+                'SELECT key, value FROM Settings'
+            ).fetchall())
+        except sqlite3.OperationalError:
+            current_settings = {}
+
+        sets = DEFAULT_SETTINGS.copy()
+        sets.update(current_settings)
+        sets.update(settings)
+
+        for key in METADATA:
+            sets.pop(key, None)
+
+        # Chance to set pragmas before any tables are created.
+
+        for key, value in sorted(sets.items()):
+            if key.startswith('sqlite_'):
+                self.reset(key, value, update=False)
+
+        sql('CREATE TABLE IF NOT EXISTS Settings ('
+            ' key TEXT NOT NULL UNIQUE,'
+            ' value)'
+        )
+
+        # Setup Disk object (must happen after settings initialized).
+
+        kwargs = {
+            key[5:]: value for key, value in sets.items()
+            if key.startswith('disk_')
+        }
+        self._disk = disk(directory, **kwargs)
+
+        # Set cached attributes: updates settings and sets pragmas.
+
+        for key, value in sets.items():
+            query = 'INSERT OR REPLACE INTO Settings VALUES (?, ?)'
+            sql(query, (key, value))
+            self.reset(key, value)
+
+        for key, value in METADATA.items():
+            query = 'INSERT OR IGNORE INTO Settings VALUES (?, ?)'
+            sql(query, (key, value))
+            self.reset(key)
+
+        (self._page_size,), = sql('PRAGMA page_size').fetchall()
+
+        # Setup Cache table.
+
+        sql('CREATE TABLE IF NOT EXISTS Cache ('
+            ' rowid INTEGER PRIMARY KEY,'
+            ' key BLOB,'
+            ' raw INTEGER,'
+            ' store_time REAL,'
+            ' expire_time REAL,'
+            ' access_time REAL,'
+            ' access_count INTEGER DEFAULT 0,'
+            ' tag BLOB,'
+            ' size INTEGER DEFAULT 0,'
+            ' mode INTEGER DEFAULT 0,'
+            ' filename TEXT,'
+            ' value BLOB)'
+        )
+
+        sql('CREATE UNIQUE INDEX IF NOT EXISTS Cache_key_raw ON'
+            ' Cache(key, raw)'
+        )
+
+        sql('CREATE INDEX IF NOT EXISTS Cache_expire_time ON'
+            ' Cache (expire_time)'
+        )
+
+        query = EVICTION_POLICY[self.eviction_policy]['init']
+
+        if query is not None:
+            sql(query)
+
+        # Use triggers to keep Metadata updated.
+
+        sql('CREATE TRIGGER IF NOT EXISTS Settings_count_insert'
+            ' AFTER INSERT ON Cache FOR EACH ROW BEGIN'
+            ' UPDATE Settings SET value = value + 1'
+            ' WHERE key = "count"; END'
+        )
+
+        sql('CREATE TRIGGER IF NOT EXISTS Settings_count_delete'
+            ' AFTER DELETE ON Cache FOR EACH ROW BEGIN'
+            ' UPDATE Settings SET value = value - 1'
+            ' WHERE key = "count"; END'
+        )
+
+        sql('CREATE TRIGGER IF NOT EXISTS Settings_size_insert'
+            ' AFTER INSERT ON Cache FOR EACH ROW BEGIN'
+            ' UPDATE Settings SET value = value + NEW.size'
+            ' WHERE key = "size"; END'
+        )
+
+        sql('CREATE TRIGGER IF NOT EXISTS Settings_size_update'
+            ' AFTER UPDATE ON Cache FOR EACH ROW BEGIN'
+            ' UPDATE Settings'
+            ' SET value = value + NEW.size - OLD.size'
+            ' WHERE key = "size"; END'
+        )
+
+        sql('CREATE TRIGGER IF NOT EXISTS Settings_size_delete'
+            ' AFTER DELETE ON Cache FOR EACH ROW BEGIN'
+            ' UPDATE Settings SET value = value - OLD.size'
+            ' WHERE key = "size"; END'
+        )
+
+        # Create tag index if requested.
+
+        if self.tag_index:  # pylint: disable=no-member
+            self.create_tag_index()
+        else:
+            self.drop_tag_index()
+
+        # Close and re-open database connection with given timeout.
+
+        self.close()
+        self._timeout = timeout
+        self._sql  # pylint: disable=pointless-statement
+
+
+    @property
+    def directory(self):
+        """Cache directory."""
+        return self._directory
+
+
+    @property
+    def timeout(self):
+        """SQLite connection timeout value in seconds."""
+        return self._timeout
+
+
+    @property
+    def disk(self):
+        """Disk used for serialization."""
+        return self._disk
+
+
+    @property
+    def _con(self):
+        # Check process ID to support process forking. If the process
+        # ID changes, close the connection and update the process ID.
+
+        local_pid = getattr(self._local, 'pid', None)
+        pid = os.getpid()
+
+        if local_pid != pid:
+            self.close()
+            self._local.pid = pid
+
+        con = getattr(self._local, 'con', None)
+
+        if con is None:
+            con = self._local.con = sqlite3.connect(
+                op.join(self._directory, DBNAME),
+                timeout=self._timeout,
+                isolation_level=None,
+            )
+
+            # Some SQLite pragmas work on a per-connection basis so
+            # query the Settings table and reset the pragmas. The
+            # Settings table may not exist so catch and ignore the
+            # OperationalError that may occur.
+
+            try:
+                select = 'SELECT key, value FROM Settings'
+                settings = con.execute(select).fetchall()
+            except sqlite3.OperationalError:
+                pass
+            else:
+                for key, value in settings:
+                    if key.startswith('sqlite_'):
+                        self.reset(key, value, update=False)
+
+        return con
+
+
+    @property
+    def _sql(self):
+        return self._con.execute
+
+
+    @property
+    def _sql_retry(self):
+        sql = self._sql
+
+        # 2018-11-01 GrantJ - Some SQLite builds/versions handle
+        # the SQLITE_BUSY return value and connection parameter
+        # "timeout" differently. For a more reliable duration,
+        # manually retry the statement for 60 seconds. Only used
+        # by statements which modify the database and do not use
+        # a transaction (like those in ``__init__`` or ``reset``).
+        # See Issue #85 for and tests/issue_85.py for more details.
+
+        def _execute_with_retry(statement, *args, **kwargs):
+            start = time.time()
+            while True:
+                try:
+                    return sql(statement, *args, **kwargs)
+                except sqlite3.OperationalError as exc:
+                    if str(exc) != 'database is locked':
+                        raise
+                    diff = time.time() - start
+                    if diff > 60:
+                        raise
+                    time.sleep(0.001)
+
+        return _execute_with_retry
+
+
+    @cl.contextmanager
+    def transact(self, retry=False):
+        """Context manager to perform a transaction by locking the cache.
+
+        While the cache is locked, no other write operation is permitted.
+        Transactions should therefore be as short as possible. Read and write
+        operations performed in a transaction are atomic. Read operations may
+        occur concurrent to a transaction.
+
+        Transactions may be nested and may not be shared between threads.
+
+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+        `False` (default).
+
+        >>> cache = Cache()
+        >>> with cache.transact():  # Atomically increment two keys.
+        ...     _ = cache.incr('total', 123.4)
+        ...     _ = cache.incr('count', 1)
+        >>> with cache.transact():  # Atomically calculate average.
+        ...     average = cache['total'] / cache['count']
+        >>> average
+        123.4
+
+        :param bool retry: retry if database timeout occurs (default False)
+        :return: context manager for use in `with` statement
+        :raises Timeout: if database timeout occurs
+
+        """
+        with self._transact(retry=retry):
+            yield
+
+
+    @cl.contextmanager
+    def _transact(self, retry=False, filename=None):
+        sql = self._sql
+        filenames = []
+        _disk_remove = self._disk.remove
+        tid = threading.get_ident()
+        txn_id = self._txn_id
+
+        if tid == txn_id:
+            begin = False
+        else:
+            while True:
+                try:
+                    sql('BEGIN IMMEDIATE')
+                    begin = True
+                    self._txn_id = tid
+                    break
+                except sqlite3.OperationalError:
+                    if retry:
+                        continue
+                    if filename is not None:
+                        _disk_remove(filename)
+                    raise Timeout from None
+
+        try:
+            yield sql, filenames.append
+        except BaseException:
+            if begin:
+                assert self._txn_id == tid
+                self._txn_id = None
+                sql('ROLLBACK')
+            raise
+        else:
+            if begin:
+                assert self._txn_id == tid
+                self._txn_id = None
+                sql('COMMIT')
+            for name in filenames:
+                if name is not None:
+                    _disk_remove(name)
+
+
+    def set(self, key, value, expire=None, read=False, tag=None, retry=False):
+        """Set `key` and `value` item in cache.
+
+        When `read` is `True`, `value` should be a file-like object opened
+        for reading in binary mode.
+
+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+        `False` (default).
+
+        :param key: key for item
+        :param value: value for item
+        :param float expire: seconds until item expires
+            (default None, no expiry)
+        :param bool read: read value as bytes from file (default False)
+        :param str tag: text to associate with key (default None)
+        :param bool retry: retry if database timeout occurs (default False)
+        :return: True if item was set
+        :raises Timeout: if database timeout occurs
+
+        """
+        now = time.time()
+        db_key, raw = self._disk.put(key)
+        expire_time = None if expire is None else now + expire
+        size, mode, filename, db_value = self._disk.store(value, read, key=key)
+        columns = (expire_time, tag, size, mode, filename, db_value)
+
+        # The order of SELECT, UPDATE, and INSERT is important below.
+        #
+        # Typical cache usage pattern is:
+        #
+        # value = cache.get(key)
+        # if value is None:
+        #     value = expensive_calculation()
+        #     cache.set(key, value)
+        #
+        # Cache.get does not evict expired keys to avoid writes during lookups.
+        # Commonly used/expired keys will therefore remain in the cache making
+        # an UPDATE the preferred path.
+        #
+        # The alternative is to assume the key is not present by first trying
+        # to INSERT and then handling the IntegrityError that occurs from
+        # violating the UNIQUE constraint. This optimistic approach was
+        # rejected based on the common cache usage pattern.
+        #
+        # INSERT OR REPLACE aka UPSERT is not used because the old filename may
+        # need cleanup.
+
+        with self._transact(retry, filename) as (sql, cleanup):
+            rows = sql(
+                'SELECT rowid, filename FROM Cache'
+                ' WHERE key = ? AND raw = ?',
+                (db_key, raw),
+            ).fetchall()
+
+            if rows:
+                (rowid, old_filename), = rows
+                cleanup(old_filename)
+                self._row_update(rowid, now, columns)
+            else:
+                self._row_insert(db_key, raw, now, columns)
+
+            self._cull(now, sql, cleanup)
+
+            return True
+
+
+    def __setitem__(self, key, value):
+        """Set corresponding `value` for `key` in cache.
+
+        :param key: key for item
+        :param value: value for item
+        :return: corresponding value
+        :raises KeyError: if key is not found
+
+        """
+        self.set(key, value, retry=True)
+
+
+    def _row_update(self, rowid, now, columns):
+        sql = self._sql
+        expire_time, tag, size, mode, filename, value = columns
+        sql('UPDATE Cache SET'
+            ' store_time = ?,'
+            ' expire_time = ?,'
+            ' access_time = ?,'
+            ' access_count = ?,'
+            ' tag = ?,'
+            ' size = ?,'
+            ' mode = ?,'
+            ' filename = ?,'
+            ' value = ?'
+            ' WHERE rowid = ?', (
+                now,          # store_time
+                expire_time,
+                now,          # access_time
+                0,            # access_count
+                tag,
+                size,
+                mode,
+                filename,
+                value,
+                rowid,
+            ),
+        )
+
+
+    def _row_insert(self, key, raw, now, columns):
+        sql = self._sql
+        expire_time, tag, size, mode, filename, value = columns
+        sql('INSERT INTO Cache('
+            ' key, raw, store_time, expire_time, access_time,'
+            ' access_count, tag, size, mode, filename, value'
+            ') VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', (
+                key,
+                raw,
+                now,         # store_time
+                expire_time,
+                now,         # access_time
+                0,           # access_count
+                tag,
+                size,
+                mode,
+                filename,
+                value,
+            ),
+        )
+
+
+    def _cull(self, now, sql, cleanup, limit=None):
+        cull_limit = self.cull_limit if limit is None else limit
+
+        if cull_limit == 0:
+            return
+
+        # Evict expired keys.
+
+        select_expired_template = (
+            'SELECT %s FROM Cache'
+            ' WHERE expire_time IS NOT NULL AND expire_time < ?'
+            ' ORDER BY expire_time LIMIT ?'
+        )
+
+        select_expired = select_expired_template % 'filename'
+        rows = sql(select_expired, (now, cull_limit)).fetchall()
+
+        if rows:
+            delete_expired = (
+                'DELETE FROM Cache WHERE rowid IN (%s)'
+                % (select_expired_template % 'rowid')
+            )
+            sql(delete_expired, (now, cull_limit))
+
+            for filename, in rows:
+                cleanup(filename)
+
+            cull_limit -= len(rows)
+
+            if cull_limit == 0:
+                return
+
+        # Evict keys by policy.
+
+        select_policy = EVICTION_POLICY[self.eviction_policy]['cull']
+
+        if select_policy is None or self.volume() < self.size_limit:
+            return
+
+        select_filename = select_policy.format(fields='filename', now=now)
+        rows = sql(select_filename, (cull_limit,)).fetchall()
+
+        if rows:
+            delete = (
+                'DELETE FROM Cache WHERE rowid IN (%s)'
+                % (select_policy.format(fields='rowid', now=now))
+            )
+            sql(delete, (cull_limit,))
+
+            for filename, in rows:
+                cleanup(filename)
+
+
+    def touch(self, key, expire=None, retry=False):
+        """Touch `key` in cache and update `expire` time.
+
+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+        `False` (default).
+
+        :param key: key for item
+        :param float expire: seconds until item expires
+            (default None, no expiry)
+        :param bool retry: retry if database timeout occurs (default False)
+        :return: True if key was touched
+        :raises Timeout: if database timeout occurs
+
+        """
+        now = time.time()
+        db_key, raw = self._disk.put(key)
+        expire_time = None if expire is None else now + expire
+
+        with self._transact(retry) as (sql, _):
+            rows = sql(
+                'SELECT rowid, expire_time FROM Cache'
+                ' WHERE key = ? AND raw = ?',
+                (db_key, raw),
+            ).fetchall()
+
+            if rows:
+                (rowid, old_expire_time), = rows
+
+                if old_expire_time is None or old_expire_time > now:
+                    sql('UPDATE Cache SET expire_time = ? WHERE rowid = ?',
+                        (expire_time, rowid),
+                    )
+                    return True
+
+        return False
+
+
+    def add(self, key, value, expire=None, read=False, tag=None, retry=False):
+        """Add `key` and `value` item to cache.
+
+        Similar to `set`, but only add to cache if key not present.
+
+        Operation is atomic. Only one concurrent add operation for a given key
+        will succeed.
+
+        When `read` is `True`, `value` should be a file-like object opened
+        for reading in binary mode.
+
+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+        `False` (default).
+
+        :param key: key for item
+        :param value: value for item
+        :param float expire: seconds until the key expires
+            (default None, no expiry)
+        :param bool read: read value as bytes from file (default False)
+        :param str tag: text to associate with key (default None)
+        :param bool retry: retry if database timeout occurs (default False)
+        :return: True if item was added
+        :raises Timeout: if database timeout occurs
+
+        """
+        now = time.time()
+        db_key, raw = self._disk.put(key)
+        expire_time = None if expire is None else now + expire
+        size, mode, filename, db_value = self._disk.store(value, read, key=key)
+        columns = (expire_time, tag, size, mode, filename, db_value)
+
+        with self._transact(retry, filename) as (sql, cleanup):
+            rows = sql(
+                'SELECT rowid, filename, expire_time FROM Cache'
+                ' WHERE key = ? AND raw = ?',
+                (db_key, raw),
+            ).fetchall()
+
+            if rows:
+                (rowid, old_filename, old_expire_time), = rows
+
+                if old_expire_time is None or old_expire_time > now:
+                    cleanup(filename)
+                    return False
+
+                cleanup(old_filename)
+                self._row_update(rowid, now, columns)
+            else:
+                self._row_insert(db_key, raw, now, columns)
+
+            self._cull(now, sql, cleanup)
+
+            return True
+
+
+    def incr(self, key, delta=1, default=0, retry=False):
+        """Increment value by delta for item with key.
+
+        If key is missing and default is None then raise KeyError. Else if key
+        is missing and default is not None then use default for value.
+
+        Operation is atomic. All concurrent increment operations will be
+        counted individually.
+
+        Assumes value may be stored in a SQLite column. Most builds that target
+        machines with 64-bit pointer widths will support 64-bit signed
+        integers.
+
+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+        `False` (default).
+
+        :param key: key for item
+        :param int delta: amount to increment (default 1)
+        :param int default: value if key is missing (default 0)
+        :param bool retry: retry if database timeout occurs (default False)
+        :return: new value for item
+        :raises KeyError: if key is not found and default is None
+        :raises Timeout: if database timeout occurs
+
+        """
+        now = time.time()
+        db_key, raw = self._disk.put(key)
+        select = (
+            'SELECT rowid, expire_time, filename, value FROM Cache'
+            ' WHERE key = ? AND raw = ?'
+        )
+
+        with self._transact(retry) as (sql, cleanup):
+            rows = sql(select, (db_key, raw)).fetchall()
+
+            if not rows:
+                if default is None:
+                    raise KeyError(key)
+
+                value = default + delta
+                columns = (
+                    (None, None) + self._disk.store(value, False, key=key)
+                )
+                self._row_insert(db_key, raw, now, columns)
+                self._cull(now, sql, cleanup)
+                return value
+
+            (rowid, expire_time, filename, value), = rows
+
+            if expire_time is not None and expire_time < now:
+                if default is None:
+                    raise KeyError(key)
+
+                value = default + delta
+                columns = (
+                    (None, None) + self._disk.store(value, False, key=key)
+                )
+                self._row_update(rowid, now, columns)
+                self._cull(now, sql, cleanup)
+                cleanup(filename)
+                return value
+
+            value += delta
+
+            columns = 'store_time = ?, value = ?'
+            update_column = EVICTION_POLICY[self.eviction_policy]['get']
+
+            if update_column is not None:
+                columns += ', ' + update_column.format(now=now)
+
+            update = 'UPDATE Cache SET %s WHERE rowid = ?' % columns
+            sql(update, (now, value, rowid))
+
+            return value
+
+
+    def decr(self, key, delta=1, default=0, retry=False):
+        """Decrement value by delta for item with key.
+
+        If key is missing and default is None then raise KeyError. Else if key
+        is missing and default is not None then use default for value.
+
+        Operation is atomic. All concurrent decrement operations will be
+        counted individually.
+
+        Unlike Memcached, negative values are supported. Value may be
+        decremented below zero.
+
+        Assumes value may be stored in a SQLite column. Most builds that target
+        machines with 64-bit pointer widths will support 64-bit signed
+        integers.
+
+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+        `False` (default).
+
+        :param key: key for item
+        :param int delta: amount to decrement (default 1)
+        :param int default: value if key is missing (default 0)
+        :param bool retry: retry if database timeout occurs (default False)
+        :return: new value for item
+        :raises KeyError: if key is not found and default is None
+        :raises Timeout: if database timeout occurs
+
+        """
+        return self.incr(key, -delta, default, retry)
+
+
+    def get(self, key, default=None, read=False, expire_time=False, tag=False,
+            retry=False):
+        """Retrieve value from cache. If `key` is missing, return `default`.
+
+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+        `False` (default).
+
+        :param key: key for item
+        :param default: value to return if key is missing (default None)
+        :param bool read: if True, return file handle to value
+            (default False)
+        :param bool expire_time: if True, return expire_time in tuple
+            (default False)
+        :param bool tag: if True, return tag in tuple (default False)
+        :param bool retry: retry if database timeout occurs (default False)
+        :return: value for item or default if key not found
+        :raises Timeout: if database timeout occurs
+
+        """
+        db_key, raw = self._disk.put(key)
+        update_column = EVICTION_POLICY[self.eviction_policy]['get']
+        select = (
+            'SELECT rowid, expire_time, tag, mode, filename, value'
+            ' FROM Cache WHERE key = ? AND raw = ?'
+            ' AND (expire_time IS NULL OR expire_time > ?)'
+        )
+
+        if expire_time and tag:
+            default = (default, None, None)
+        elif expire_time or tag:
+            default = (default, None)
+
+        if not self.statistics and update_column is None:
+            # Fast path, no transaction necessary.
+
+            rows = self._sql(select, (db_key, raw, time.time())).fetchall()
+
+            if not rows:
+                return default
+
+            (rowid, db_expire_time, db_tag, mode, filename, db_value), = rows
+
+            try:
+                value = self._disk.fetch(mode, filename, db_value, read)
+            except IOError:
+                # Key was deleted before we could retrieve result.
+                return default
+
+        else:  # Slow path, transaction required.
+            cache_hit = (
+                'UPDATE Settings SET value = value + 1 WHERE key = "hits"'
+            )
+            cache_miss = (
+                'UPDATE Settings SET value = value + 1 WHERE key = "misses"'
+            )
+
+            with self._transact(retry) as (sql, _):
+                rows = sql(select, (db_key, raw, time.time())).fetchall()
+
+                if not rows:
+                    if self.statistics:
+                        sql(cache_miss)
+                    return default
+
+                (rowid, db_expire_time, db_tag,
+                     mode, filename, db_value), = rows  # noqa: E127
+
+                try:
+                    value = self._disk.fetch(mode, filename, db_value, read)
+                except IOError as error:
+                    if error.errno == errno.ENOENT:
+                        # Key was deleted before we could retrieve result.
+                        if self.statistics:
+                            sql(cache_miss)
+                        return default
+                    else:
+                        raise
+
+                if self.statistics:
+                    sql(cache_hit)
+
+                now = time.time()
+                update = 'UPDATE Cache SET %s WHERE rowid = ?'
+
+                if update_column is not None:
+                    sql(update % update_column.format(now=now), (rowid,))
+
+        if expire_time and tag:
+            return (value, db_expire_time, db_tag)
+        elif expire_time:
+            return (value, db_expire_time)
+        elif tag:
+            return (value, db_tag)
+        else:
+            return value
+
+
+    def __getitem__(self, key):
+        """Return corresponding value for `key` from cache.
+
+        :param key: key matching item
+        :return: corresponding value
+        :raises KeyError: if key is not found
+
+        """
+        value = self.get(key, default=ENOVAL, retry=True)
+        if value is ENOVAL:
+            raise KeyError(key)
+        return value
+
+
+    def read(self, key, retry=False):
+        """Return file handle value corresponding to `key` from cache.
+
+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+        `False` (default).
+
+        :param key: key matching item
+        :param bool retry: retry if database timeout occurs (default False)
+        :return: file open for reading in binary mode
+        :raises KeyError: if key is not found
+        :raises Timeout: if database timeout occurs
+
+        """
+        handle = self.get(key, default=ENOVAL, read=True, retry=retry)
+        if handle is ENOVAL:
+            raise KeyError(key)
+        return handle
+
+
+    def __contains__(self, key):
+        """Return `True` if `key` matching item is found in cache.
+
+        :param key: key matching item
+        :return: True if key matching item
+
+        """
+        sql = self._sql
+        db_key, raw = self._disk.put(key)
+        select = (
+            'SELECT rowid FROM Cache'
+            ' WHERE key = ? AND raw = ?'
+            ' AND (expire_time IS NULL OR expire_time > ?)'
+        )
+
+        rows = sql(select, (db_key, raw, time.time())).fetchall()
+
+        return bool(rows)
+
+
+    def pop(self, key, default=None, expire_time=False, tag=False, retry=False):  # noqa: E501
+        """Remove corresponding item for `key` from cache and return value.
+
+        If `key` is missing, return `default`.
+
+        Operation is atomic. Concurrent operations will be serialized.
+
+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+        `False` (default).
+
+        :param key: key for item
+        :param default: value to return if key is missing (default None)
+        :param bool expire_time: if True, return expire_time in tuple
+            (default False)
+        :param bool tag: if True, return tag in tuple (default False)
+        :param bool retry: retry if database timeout occurs (default False)
+        :return: value for item or default if key not found
+        :raises Timeout: if database timeout occurs
+
+        """
+        db_key, raw = self._disk.put(key)
+        select = (
+            'SELECT rowid, expire_time, tag, mode, filename, value'
+            ' FROM Cache WHERE key = ? AND raw = ?'
+            ' AND (expire_time IS NULL OR expire_time > ?)'
+        )
+
+        if expire_time and tag:
+            default = default, None, None
+        elif expire_time or tag:
+            default = default, None
+
+        with self._transact(retry) as (sql, _):
+            rows = sql(select, (db_key, raw, time.time())).fetchall()
+
+            if not rows:
+                return default
+
+            (rowid, db_expire_time, db_tag, mode, filename, db_value), = rows
+
+            sql('DELETE FROM Cache WHERE rowid = ?', (rowid,))
+
+        try:
+            value = self._disk.fetch(mode, filename, db_value, False)
+        except IOError as error:
+            if error.errno == errno.ENOENT:
+                # Key was deleted before we could retrieve result.
+                return default
+            else:
+                raise
+        finally:
+            if filename is not None:
+                self._disk.remove(filename)
+
+        if expire_time and tag:
+            return value, db_expire_time, db_tag
+        elif expire_time:
+            return value, db_expire_time
+        elif tag:
+            return value, db_tag
+        else:
+            return value
+
+
+    def __delitem__(self, key, retry=True):
+        """Delete corresponding item for `key` from cache.
+
+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+        `False` (default `True`).
+
+        :param key: key matching item
+        :param bool retry: retry if database timeout occurs (default True)
+        :raises KeyError: if key is not found
+        :raises Timeout: if database timeout occurs
+
+        """
+        db_key, raw = self._disk.put(key)
+
+        with self._transact(retry) as (sql, cleanup):
+            rows = sql(
+                'SELECT rowid, filename FROM Cache'
+                ' WHERE key = ? AND raw = ?'
+                ' AND (expire_time IS NULL OR expire_time > ?)',
+                (db_key, raw, time.time()),
+            ).fetchall()
+
+            if not rows:
+                raise KeyError(key)
+
+            (rowid, filename), = rows
+            sql('DELETE FROM Cache WHERE rowid = ?', (rowid,))
+            cleanup(filename)
+
+            return True
+
+
+    def delete(self, key, retry=False):
+        """Delete corresponding item for `key` from cache.
+
+        Missing keys are ignored.
+
+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+        `False` (default).
+
+        :param key: key matching item
+        :param bool retry: retry if database timeout occurs (default False)
+        :return: True if item was deleted
+        :raises Timeout: if database timeout occurs
+
+        """
+        try:
+            return self.__delitem__(key, retry=retry)
+        except KeyError:
+            return False
+
+
+    def push(self, value, prefix=None, side='back', expire=None, read=False,
+             tag=None, retry=False):
+        """Push `value` onto `side` of queue identified by `prefix` in cache.
+
+        When prefix is None, integer keys are used. Otherwise, string keys are
+        used in the format "prefix-integer". Integer starts at 500 trillion.
+
+        Defaults to pushing value on back of queue. Set side to 'front' to push
+        value on front of queue. Side must be one of 'back' or 'front'.
+
+        Operation is atomic. Concurrent operations will be serialized.
+
+        When `read` is `True`, `value` should be a file-like object opened
+        for reading in binary mode.
+
+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+        `False` (default).
+
+        See also `Cache.pull`.
+
+        >>> cache = Cache()
+        >>> print(cache.push('first value'))
+        500000000000000
+        >>> cache.get(500000000000000)
+        'first value'
+        >>> print(cache.push('second value'))
+        500000000000001
+        >>> print(cache.push('third value', side='front'))
+        499999999999999
+        >>> cache.push(1234, prefix='userids')
+        'userids-500000000000000'
+
+        :param value: value for item
+        :param str prefix: key prefix (default None, key is integer)
+        :param str side: either 'back' or 'front' (default 'back')
+        :param float expire: seconds until the key expires
+            (default None, no expiry)
+        :param bool read: read value as bytes from file (default False)
+        :param str tag: text to associate with key (default None)
+        :param bool retry: retry if database timeout occurs (default False)
+        :return: key for item in cache
+        :raises Timeout: if database timeout occurs
+
+        """
+        if prefix is None:
+            min_key = 0
+            max_key = 999999999999999
+        else:
+            min_key = prefix + '-000000000000000'
+            max_key = prefix + '-999999999999999'
+
+        now = time.time()
+        raw = True
+        expire_time = None if expire is None else now + expire
+        size, mode, filename, db_value = self._disk.store(value, read)
+        columns = (expire_time, tag, size, mode, filename, db_value)
+        order = {'back': 'DESC', 'front': 'ASC'}
+        select = (
+            'SELECT key FROM Cache'
+            ' WHERE ? < key AND key < ? AND raw = ?'
+            ' ORDER BY key %s LIMIT 1'
+        ) % order[side]
+
+        with self._transact(retry, filename) as (sql, cleanup):
+            rows = sql(select, (min_key, max_key, raw)).fetchall()
+
+            if rows:
+                (key,), = rows
+
+                if prefix is not None:
+                    num = int(key[(key.rfind('-') + 1):])
+                else:
+                    num = key
+
+                if side == 'back':
+                    num += 1
+                else:
+                    assert side == 'front'
+                    num -= 1
+            else:
+                num = 500000000000000
+
+            if prefix is not None:
+                db_key = '{0}-{1:015d}'.format(prefix, num)
+            else:
+                db_key = num
+
+            self._row_insert(db_key, raw, now, columns)
+            self._cull(now, sql, cleanup)
+
+            return db_key
+
+
+    def pull(self, prefix=None, default=(None, None), side='front',
+             expire_time=False, tag=False, retry=False):
+        """Pull key and value item pair from `side` of queue in cache.
+
+        When prefix is None, integer keys are used. Otherwise, string keys are
+        used in the format "prefix-integer". Integer starts at 500 trillion.
+
+        If queue is empty, return default.
+
+        Defaults to pulling key and value item pairs from front of queue. Set
+        side to 'back' to pull from back of queue. Side must be one of 'front'
+        or 'back'.
+
+        Operation is atomic. Concurrent operations will be serialized.
+
+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+        `False` (default).
+
+        See also `Cache.push` and `Cache.get`.
+
+        >>> cache = Cache()
+        >>> cache.pull()
+        (None, None)
+        >>> for letter in 'abc':
+        ...     print(cache.push(letter))
+        500000000000000
+        500000000000001
+        500000000000002
+        >>> key, value = cache.pull()
+        >>> print(key)
+        500000000000000
+        >>> value
+        'a'
+        >>> _, value = cache.pull(side='back')
+        >>> value
+        'c'
+        >>> cache.push(1234, 'userids')
+        'userids-500000000000000'
+        >>> _, value = cache.pull('userids')
+        >>> value
+        1234
+
+        :param str prefix: key prefix (default None, key is integer)
+        :param default: value to return if key is missing
+            (default (None, None))
+        :param str side: either 'front' or 'back' (default 'front')
+        :param bool expire_time: if True, return expire_time in tuple
+            (default False)
+        :param bool tag: if True, return tag in tuple (default False)
+        :param bool retry: retry if database timeout occurs (default False)
+        :return: key and value item pair or default if queue is empty
+        :raises Timeout: if database timeout occurs
+
+        """
+        # Caution: Nearly identical code exists in Cache.peek
+        if prefix is None:
+            min_key = 0
+            max_key = 999999999999999
+        else:
+            min_key = prefix + '-000000000000000'
+            max_key = prefix + '-999999999999999'
+
+        order = {'front': 'ASC', 'back': 'DESC'}
+        select = (
+            'SELECT rowid, key, expire_time, tag, mode, filename, value'
+            ' FROM Cache WHERE ? < key AND key < ? AND raw = 1'
+            ' ORDER BY key %s LIMIT 1'
+        ) % order[side]
+
+        if expire_time and tag:
+            default = default, None, None
+        elif expire_time or tag:
+            default = default, None
+
+        while True:
+            while True:
+                with self._transact(retry) as (sql, cleanup):
+                    rows = sql(select, (min_key, max_key)).fetchall()
+
+                    if not rows:
+                        return default
+
+                    (rowid, key, db_expire, db_tag, mode, name,
+                     db_value), = rows
+
+                    sql('DELETE FROM Cache WHERE rowid = ?', (rowid,))
+
+                    if db_expire is not None and db_expire < time.time():
+                        cleanup(name)
+                    else:
+                        break
+
+            try:
+                value = self._disk.fetch(mode, name, db_value, False)
+            except IOError as error:
+                if error.errno == errno.ENOENT:
+                    # Key was deleted before we could retrieve result.
+                    continue
+                raise
+            finally:
+                if name is not None:
+                    self._disk.remove(name)
+            break
+
+        if expire_time and tag:
+            return (key, value), db_expire, db_tag
+        elif expire_time:
+            return (key, value), db_expire
+        elif tag:
+            return (key, value), db_tag
+        else:
+            return key, value
+
+
+    def peek(self, prefix=None, default=(None, None), side='front',
+             expire_time=False, tag=False, retry=False):
+        """Peek at key and value item pair from `side` of queue in cache.
+
+        When prefix is None, integer keys are used. Otherwise, string keys are
+        used in the format "prefix-integer". Integer starts at 500 trillion.
+
+        If queue is empty, return default.
+
+        Defaults to peeking at key and value item pairs from front of queue.
+        Set side to 'back' to pull from back of queue. Side must be one of
+        'front' or 'back'.
+
+        Expired items are deleted from cache. Operation is atomic. Concurrent
+        operations will be serialized.
+
+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+        `False` (default).
+
+        See also `Cache.pull` and `Cache.push`.
+
+        >>> cache = Cache()
+        >>> for letter in 'abc':
+        ...     print(cache.push(letter))
+        500000000000000
+        500000000000001
+        500000000000002
+        >>> key, value = cache.peek()
+        >>> print(key)
+        500000000000000
+        >>> value
+        'a'
+        >>> key, value = cache.peek(side='back')
+        >>> print(key)
+        500000000000002
+        >>> value
+        'c'
+
+        :param str prefix: key prefix (default None, key is integer)
+        :param default: value to return if key is missing
+            (default (None, None))
+        :param str side: either 'front' or 'back' (default 'front')
+        :param bool expire_time: if True, return expire_time in tuple
+            (default False)
+        :param bool tag: if True, return tag in tuple (default False)
+        :param bool retry: retry if database timeout occurs (default False)
+        :return: key and value item pair or default if queue is empty
+        :raises Timeout: if database timeout occurs
+
+        """
+        # Caution: Nearly identical code exists in Cache.pull
+        if prefix is None:
+            min_key = 0
+            max_key = 999999999999999
+        else:
+            min_key = prefix + '-000000000000000'
+            max_key = prefix + '-999999999999999'
+
+        order = {'front': 'ASC', 'back': 'DESC'}
+        select = (
+            'SELECT rowid, key, expire_time, tag, mode, filename, value'
+            ' FROM Cache WHERE ? < key AND key < ? AND raw = 1'
+            ' ORDER BY key %s LIMIT 1'
+        ) % order[side]
+
+        if expire_time and tag:
+            default = default, None, None
+        elif expire_time or tag:
+            default = default, None
+
+        while True:
+            while True:
+                with self._transact(retry) as (sql, cleanup):
+                    rows = sql(select, (min_key, max_key)).fetchall()
+
+                    if not rows:
+                        return default
+
+                    (rowid, key, db_expire, db_tag, mode, name,
+                     db_value), = rows
+
+                    if db_expire is not None and db_expire < time.time():
+                        sql('DELETE FROM Cache WHERE rowid = ?', (rowid,))
+                        cleanup(name)
+                    else:
+                        break
+
+            try:
+                value = self._disk.fetch(mode, name, db_value, False)
+            except IOError as error:
+                if error.errno == errno.ENOENT:
+                    # Key was deleted before we could retrieve result.
+                    continue
+                raise
+            finally:
+                if name is not None:
+                    self._disk.remove(name)
+            break
+
+        if expire_time and tag:
+            return (key, value), db_expire, db_tag
+        elif expire_time:
+            return (key, value), db_expire
+        elif tag:
+            return (key, value), db_tag
+        else:
+            return key, value
+
+
+    def peekitem(self, last=True, expire_time=False, tag=False, retry=False):
+        """Peek at key and value item pair in cache based on iteration order.
+
+        Expired items are deleted from cache. Operation is atomic. Concurrent
+        operations will be serialized.
+
+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+        `False` (default).
+
+        >>> cache = Cache()
+        >>> for num, letter in enumerate('abc'):
+        ...     cache[letter] = num
+        >>> cache.peekitem()
+        ('c', 2)
+        >>> cache.peekitem(last=False)
+        ('a', 0)
+
+        :param bool last: last item in iteration order (default True)
+        :param bool expire_time: if True, return expire_time in tuple
+            (default False)
+        :param bool tag: if True, return tag in tuple (default False)
+        :param bool retry: retry if database timeout occurs (default False)
+        :return: key and value item pair
+        :raises KeyError: if cache is empty
+        :raises Timeout: if database timeout occurs
+
+        """
+        order = ('ASC', 'DESC')
+        select = (
+            'SELECT rowid, key, raw, expire_time, tag, mode, filename, value'
+            ' FROM Cache ORDER BY rowid %s LIMIT 1'
+        ) % order[last]
+
+        while True:
+            while True:
+                with self._transact(retry) as (sql, cleanup):
+                    rows = sql(select).fetchall()
+
+                    if not rows:
+                        raise KeyError('dictionary is empty')
+
+                    (rowid, db_key, raw, db_expire, db_tag, mode, name,
+                     db_value), = rows
+
+                    if db_expire is not None and db_expire < time.time():
+                        sql('DELETE FROM Cache WHERE rowid = ?', (rowid,))
+                        cleanup(name)
+                    else:
+                        break
+
+            key = self._disk.get(db_key, raw)
+
+            try:
+                value = self._disk.fetch(mode, name, db_value, False)
+            except IOError as error:
+                if error.errno == errno.ENOENT:
+                    # Key was deleted before we could retrieve result.
+                    continue
+                raise
+            break
+
+        if expire_time and tag:
+            return (key, value), db_expire, db_tag
+        elif expire_time:
+            return (key, value), db_expire
+        elif tag:
+            return (key, value), db_tag
+        else:
+            return key, value
+
+
+    def memoize(self, name=None, typed=False, expire=None, tag=None):
+        """Memoizing cache decorator.
+
+        Decorator to wrap callable with memoizing function using cache.
+        Repeated calls with the same arguments will lookup result in cache and
+        avoid function evaluation.
+
+        If name is set to None (default), the callable name will be determined
+        automatically.
+
+        When expire is set to zero, function results will not be set in the
+        cache. Cache lookups still occur, however. Read
+        :doc:`case-study-landing-page-caching` for example usage.
+
+        If typed is set to True, function arguments of different types will be
+        cached separately. For example, f(3) and f(3.0) will be treated as
+        distinct calls with distinct results.
+
+        The original underlying function is accessible through the __wrapped__
+        attribute. This is useful for introspection, for bypassing the cache,
+        or for rewrapping the function with a different cache.
+
+        >>> from diskcache import Cache
+        >>> cache = Cache()
+        >>> @cache.memoize(expire=1, tag='fib')
+        ... def fibonacci(number):
+        ...     if number == 0:
+        ...         return 0
+        ...     elif number == 1:
+        ...         return 1
+        ...     else:
+        ...         return fibonacci(number - 1) + fibonacci(number - 2)
+        >>> print(fibonacci(100))
+        354224848179261915075
+
+        An additional `__cache_key__` attribute can be used to generate the
+        cache key used for the given arguments.
+
+        >>> key = fibonacci.__cache_key__(100)
+        >>> print(cache[key])
+        354224848179261915075
+
+        Remember to call memoize when decorating a callable. If you forget,
+        then a TypeError will occur. Note the lack of parenthenses after
+        memoize below:
+
+        >>> @cache.memoize
+        ... def test():
+        ...     pass
+        Traceback (most recent call last):
+            ...
+        TypeError: name cannot be callable
+
+        :param cache: cache to store callable arguments and return values
+        :param str name: name given for callable (default None, automatic)
+        :param bool typed: cache different types separately (default False)
+        :param float expire: seconds until arguments expire
+            (default None, no expiry)
+        :param str tag: text to associate with arguments (default None)
+        :return: callable decorator
+
+        """
+        # Caution: Nearly identical code exists in DjangoCache.memoize
+        if callable(name):
+            raise TypeError('name cannot be callable')
+
+        def decorator(func):
+            "Decorator created by memoize() for callable `func`."
+            base = (full_name(func),) if name is None else (name,)
+
+            @ft.wraps(func)
+            def wrapper(*args, **kwargs):
+                "Wrapper for callable to cache arguments and return values."
+                key = wrapper.__cache_key__(*args, **kwargs)
+                result = self.get(key, default=ENOVAL, retry=True)
+
+                if result is ENOVAL:
+                    result = func(*args, **kwargs)
+                    if expire is None or expire > 0:
+                        self.set(key, result, expire, tag=tag, retry=True)
+
+                return result
+
+            def __cache_key__(*args, **kwargs):
+                "Make key for cache given function arguments."
+                return args_to_key(base, args, kwargs, typed)
+
+            wrapper.__cache_key__ = __cache_key__
+            return wrapper
+
+        return decorator
+
+
+    def check(self, fix=False, retry=False):
+        """Check database and file system consistency.
+
+        Intended for use in testing and post-mortem error analysis.
+
+        While checking the Cache table for consistency, a writer lock is held
+        on the database. The lock blocks other cache clients from writing to
+        the database. For caches with many file references, the lock may be
+        held for a long time. For example, local benchmarking shows that a
+        cache with 1,000 file references takes ~60ms to check.
+
+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+        `False` (default).
+
+        :param bool fix: correct inconsistencies
+        :param bool retry: retry if database timeout occurs (default False)
+        :return: list of warnings
+        :raises Timeout: if database timeout occurs
+
+        """
+        # pylint: disable=access-member-before-definition,W0201
+        with warnings.catch_warnings(record=True) as warns:
+            sql = self._sql
+
+            # Check integrity of database.
+
+            rows = sql('PRAGMA integrity_check').fetchall()
+
+            if len(rows) != 1 or rows[0][0] != u'ok':
+                for message, in rows:
+                    warnings.warn(message)
+
+            if fix:
+                sql('VACUUM')
+
+            with self._transact(retry) as (sql, _):
+
+                # Check Cache.filename against file system.
+
+                filenames = set()
+                select = (
+                    'SELECT rowid, size, filename FROM Cache'
+                    ' WHERE filename IS NOT NULL'
+                )
+
+                rows = sql(select).fetchall()
+
+                for rowid, size, filename in rows:
+                    full_path = op.join(self._directory, filename)
+                    filenames.add(full_path)
+
+                    if op.exists(full_path):
+                        real_size = op.getsize(full_path)
+
+                        if size != real_size:
+                            message = 'wrong file size: %s, %d != %d'
+                            args = full_path, real_size, size
+                            warnings.warn(message % args)
+
+                            if fix:
+                                sql('UPDATE Cache SET size = ?'
+                                    ' WHERE rowid = ?',
+                                    (real_size, rowid),
+                                )
+
+                        continue
+
+                    warnings.warn('file not found: %s' % full_path)
+
+                    if fix:
+                        sql('DELETE FROM Cache WHERE rowid = ?', (rowid,))
+
+                # Check file system against Cache.filename.
+
+                for dirpath, _, files in os.walk(self._directory):
+                    paths = [op.join(dirpath, filename) for filename in files]
+                    error = set(paths) - filenames
+
+                    for full_path in error:
+                        if DBNAME in full_path:
+                            continue
+
+                        message = 'unknown file: %s' % full_path
+                        warnings.warn(message, UnknownFileWarning)
+
+                        if fix:
+                            os.remove(full_path)
+
+                # Check for empty directories.
+
+                for dirpath, dirs, files in os.walk(self._directory):
+                    if not (dirs or files):
+                        message = 'empty directory: %s' % dirpath
+                        warnings.warn(message, EmptyDirWarning)
+
+                        if fix:
+                            os.rmdir(dirpath)
+
+                # Check Settings.count against count of Cache rows.
+
+                self.reset('count')
+                (count,), = sql('SELECT COUNT(key) FROM Cache').fetchall()
+
+                if self.count != count:
+                    message = 'Settings.count != COUNT(Cache.key); %d != %d'
+                    warnings.warn(message % (self.count, count))
+
+                    if fix:
+                        sql('UPDATE Settings SET value = ? WHERE key = ?',
+                            (count, 'count'),
+                        )
+
+                # Check Settings.size against sum of Cache.size column.
+
+                self.reset('size')
+                select_size = 'SELECT COALESCE(SUM(size), 0) FROM Cache'
+                (size,), = sql(select_size).fetchall()
+
+                if self.size != size:
+                    message = 'Settings.size != SUM(Cache.size); %d != %d'
+                    warnings.warn(message % (self.size, size))
+
+                    if fix:
+                        sql('UPDATE Settings SET value = ? WHERE key =?',
+                            (size, 'size'),
+                        )
+
+            return warns
+
+
+    def create_tag_index(self):
+        """Create tag index on cache database.
+
+        It is better to initialize cache with `tag_index=True` than use this.
+
+        :raises Timeout: if database timeout occurs
+
+        """
+        sql = self._sql
+        sql('CREATE INDEX IF NOT EXISTS Cache_tag_rowid ON Cache(tag, rowid)')
+        self.reset('tag_index', 1)
+
+
+    def drop_tag_index(self):
+        """Drop tag index on cache database.
+
+        :raises Timeout: if database timeout occurs
+
+        """
+        sql = self._sql
+        sql('DROP INDEX IF EXISTS Cache_tag_rowid')
+        self.reset('tag_index', 0)
+
+
+    def evict(self, tag, retry=False):
+        """Remove items with matching `tag` from cache.
+
+        Removing items is an iterative process. In each iteration, a subset of
+        items is removed. Concurrent writes may occur between iterations.
+
+        If a :exc:`Timeout` occurs, the first element of the exception's
+        `args` attribute will be the number of items removed before the
+        exception occurred.
+
+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+        `False` (default).
+
+        :param str tag: tag identifying items
+        :param bool retry: retry if database timeout occurs (default False)
+        :return: count of rows removed
+        :raises Timeout: if database timeout occurs
+
+        """
+        select = (
+            'SELECT rowid, filename FROM Cache'
+            ' WHERE tag = ? AND rowid > ?'
+            ' ORDER BY rowid LIMIT ?'
+        )
+        args = [tag, 0, 100]
+        return self._select_delete(select, args, arg_index=1, retry=retry)
+
+
+    def expire(self, now=None, retry=False):
+        """Remove expired items from cache.
+
+        Removing items is an iterative process. In each iteration, a subset of
+        items is removed. Concurrent writes may occur between iterations.
+
+        If a :exc:`Timeout` occurs, the first element of the exception's
+        `args` attribute will be the number of items removed before the
+        exception occurred.
+
+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+        `False` (default).
+
+        :param float now: current time (default None, ``time.time()`` used)
+        :param bool retry: retry if database timeout occurs (default False)
+        :return: count of items removed
+        :raises Timeout: if database timeout occurs
+
+        """
+        select = (
+            'SELECT rowid, expire_time, filename FROM Cache'
+            ' WHERE ? < expire_time AND expire_time < ?'
+            ' ORDER BY expire_time LIMIT ?'
+        )
+        args = [0, now or time.time(), 100]
+        return self._select_delete(select, args, row_index=1, retry=retry)
+
+
+    def cull(self, retry=False):
+        """Cull items from cache until volume is less than size limit.
+
+        Removing items is an iterative process. In each iteration, a subset of
+        items is removed. Concurrent writes may occur between iterations.
+
+        If a :exc:`Timeout` occurs, the first element of the exception's
+        `args` attribute will be the number of items removed before the
+        exception occurred.
+
+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+        `False` (default).
+
+        :param bool retry: retry if database timeout occurs (default False)
+        :return: count of items removed
+        :raises Timeout: if database timeout occurs
+
+        """
+        now = time.time()
+
+        # Remove expired items.
+
+        count = self.expire(now)
+
+        # Remove items by policy.
+
+        select_policy = EVICTION_POLICY[self.eviction_policy]['cull']
+
+        if select_policy is None:
+            return
+
+        select_filename = select_policy.format(fields='filename', now=now)
+
+        try:
+            while self.volume() > self.size_limit:
+                with self._transact(retry) as (sql, cleanup):
+                    rows = sql(select_filename, (10,)).fetchall()
+
+                    if not rows:
+                        break
+
+                    count += len(rows)
+                    delete = (
+                        'DELETE FROM Cache WHERE rowid IN (%s)'
+                        % select_policy.format(fields='rowid', now=now)
+                    )
+                    sql(delete, (10,))
+
+                    for filename, in rows:
+                        cleanup(filename)
+        except Timeout:
+            raise Timeout(count) from None
+
+        return count
+
+
+    def clear(self, retry=False):
+        """Remove all items from cache.
+
+        Removing items is an iterative process. In each iteration, a subset of
+        items is removed. Concurrent writes may occur between iterations.
+
+        If a :exc:`Timeout` occurs, the first element of the exception's
+        `args` attribute will be the number of items removed before the
+        exception occurred.
+
+        Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+        `False` (default).
+
+        :param bool retry: retry if database timeout occurs (default False)
+        :return: count of rows removed
+        :raises Timeout: if database timeout occurs
+
+        """
+        select = (
+            'SELECT rowid, filename FROM Cache'
+            ' WHERE rowid > ?'
+            ' ORDER BY rowid LIMIT ?'
+        )
+        args = [0, 100]
+        return self._select_delete(select, args, retry=retry)
+
+
+    def _select_delete(self, select, args, row_index=0, arg_index=0,
+                       retry=False):
+        count = 0
+        delete = 'DELETE FROM Cache WHERE rowid IN (%s)'
+
+        try:
+            while True:
+                with self._transact(retry) as (sql, cleanup):
+                    rows = sql(select, args).fetchall()
+
+                    if not rows:
+                        break
+
+                    count += len(rows)
+                    sql(delete % ','.join(str(row[0]) for row in rows))
+
+                    for row in rows:
+                        args[arg_index] = row[row_index]
+                        cleanup(row[-1])
+
+        except Timeout:
+            raise Timeout(count) from None
+
+        return count
+
+
+    def iterkeys(self, reverse=False):
+        """Iterate Cache keys in database sort order.
+
+        >>> cache = Cache()
+        >>> for key in [4, 1, 3, 0, 2]:
+        ...     cache[key] = key
+        >>> list(cache.iterkeys())
+        [0, 1, 2, 3, 4]
+        >>> list(cache.iterkeys(reverse=True))
+        [4, 3, 2, 1, 0]
+
+        :param bool reverse: reverse sort order (default False)
+        :return: iterator of Cache keys
+
+        """
+        sql = self._sql
+        limit = 100
+        _disk_get = self._disk.get
+
+        if reverse:
+            select = (
+                'SELECT key, raw FROM Cache'
+                ' ORDER BY key DESC, raw DESC LIMIT 1'
+            )
+            iterate = (
+                'SELECT key, raw FROM Cache'
+                ' WHERE key = ? AND raw < ? OR key < ?'
+                ' ORDER BY key DESC, raw DESC LIMIT ?'
+            )
+        else:
+            select = (
+                'SELECT key, raw FROM Cache'
+                ' ORDER BY key ASC, raw ASC LIMIT 1'
+            )
+            iterate = (
+                'SELECT key, raw FROM Cache'
+                ' WHERE key = ? AND raw > ? OR key > ?'
+                ' ORDER BY key ASC, raw ASC LIMIT ?'
+            )
+
+        row = sql(select).fetchall()
+
+        if row:
+            (key, raw), = row
+        else:
+            return
+
+        yield _disk_get(key, raw)
+
+        while True:
+            rows = sql(iterate, (key, raw, key, limit)).fetchall()
+
+            if not rows:
+                break
+
+            for key, raw in rows:
+                yield _disk_get(key, raw)
+
+
+    def _iter(self, ascending=True):
+        sql = self._sql
+        rows = sql('SELECT MAX(rowid) FROM Cache').fetchall()
+        (max_rowid,), = rows
+        yield  # Signal ready.
+
+        if max_rowid is None:
+            return
+
+        bound = max_rowid + 1
+        limit = 100
+        _disk_get = self._disk.get
+        rowid = 0 if ascending else bound
+        select = (
+            'SELECT rowid, key, raw FROM Cache'
+            ' WHERE ? < rowid AND rowid < ?'
+            ' ORDER BY rowid %s LIMIT ?'
+        ) % ('ASC' if ascending else 'DESC')
+
+        while True:
+            if ascending:
+                args = (rowid, bound, limit)
+            else:
+                args = (0, rowid, limit)
+
+            rows = sql(select, args).fetchall()
+
+            if not rows:
+                break
+
+            for rowid, key, raw in rows:
+                yield _disk_get(key, raw)
+
+
+    def __iter__(self):
+        "Iterate keys in cache including expired items."
+        iterator = self._iter()
+        next(iterator)
+        return iterator
+
+
+    def __reversed__(self):
+        "Reverse iterate keys in cache including expired items."
+        iterator = self._iter(ascending=False)
+        next(iterator)
+        return iterator
+
+
+    def stats(self, enable=True, reset=False):
+        """Return cache statistics hits and misses.
+
+        :param bool enable: enable collecting statistics (default True)
+        :param bool reset: reset hits and misses to 0 (default False)
+        :return: (hits, misses)
+
+        """
+        # pylint: disable=E0203,W0201
+        result = (self.reset('hits'), self.reset('misses'))
+
+        if reset:
+            self.reset('hits', 0)
+            self.reset('misses', 0)
+
+        self.reset('statistics', enable)
+
+        return result
+
+
+    def volume(self):
+        """Return estimated total size of cache on disk.
+
+        :return: size in bytes
+
+        """
+        (page_count,), = self._sql('PRAGMA page_count').fetchall()
+        total_size = self._page_size * page_count + self.reset('size')
+        return total_size
+
+
+    def close(self):
+        """Close database connection.
+
+        """
+        con = getattr(self._local, 'con', None)
+
+        if con is None:
+            return
+
+        con.close()
+
+        try:
+            delattr(self._local, 'con')
+        except AttributeError:
+            pass
+
+
+    def __enter__(self):
+        # Create connection in thread.
+        # pylint: disable=unused-variable
+        connection = self._con  # noqa
+        return self
+
+
+    def __exit__(self, *exception):
+        self.close()
+
+
+    def __len__(self):
+        "Count of items in cache including expired items."
+        return self.reset('count')
+
+
+    def __getstate__(self):
+        return (self.directory, self.timeout, type(self.disk))
+
+
+    def __setstate__(self, state):
+        self.__init__(*state)
+
+
+    def reset(self, key, value=ENOVAL, update=True):
+        """Reset `key` and `value` item from Settings table.
+
+        Use `reset` to update the value of Cache settings correctly. Cache
+        settings are stored in the Settings table of the SQLite database. If
+        `update` is ``False`` then no attempt is made to update the database.
+
+        If `value` is not given, it is reloaded from the Settings
+        table. Otherwise, the Settings table is updated.
+
+        Settings with the ``disk_`` prefix correspond to Disk
+        attributes. Updating the value will change the unprefixed attribute on
+        the associated Disk instance.
+
+        Settings with the ``sqlite_`` prefix correspond to SQLite
+        pragmas. Updating the value will execute the corresponding PRAGMA
+        statement.
+
+        SQLite PRAGMA statements may be executed before the Settings table
+        exists in the database by setting `update` to ``False``.
+
+        :param str key: Settings key for item
+        :param value: value for item (optional)
+        :param bool update: update database Settings table (default True)
+        :return: updated value for item
+        :raises Timeout: if database timeout occurs
+
+        """
+        sql = self._sql
+        sql_retry = self._sql_retry
+
+        if value is ENOVAL:
+            select = 'SELECT value FROM Settings WHERE key = ?'
+            (value,), = sql_retry(select, (key,)).fetchall()
+            setattr(self, key, value)
+            return value
+
+        if update:
+            statement = 'UPDATE Settings SET value = ? WHERE key = ?'
+            sql_retry(statement, (value, key))
+
+        if key.startswith('sqlite_'):
+            pragma = key[7:]
+
+            # 2016-02-17 GrantJ - PRAGMA and isolation_level=None
+            # don't always play nicely together. Retry setting the
+            # PRAGMA. I think some PRAGMA statements expect to
+            # immediately take an EXCLUSIVE lock on the database. I
+            # can't find any documentation for this but without the
+            # retry, stress will intermittently fail with multiple
+            # processes.
+
+            # 2018-11-05 GrantJ - Avoid setting pragma values that
+            # are already set. Pragma settings like auto_vacuum and
+            # journal_mode can take a long time or may not work after
+            # tables have been created.
+
+            start = time.time()
+            while True:
+                try:
+                    try:
+                        (old_value,), = sql('PRAGMA %s' % (pragma)).fetchall()
+                        update = old_value != value
+                    except ValueError:
+                        update = True
+                    if update:
+                        sql('PRAGMA %s = %s' % (pragma, value)).fetchall()
+                    break
+                except sqlite3.OperationalError as exc:
+                    if str(exc) != 'database is locked':
+                        raise
+                    diff = time.time() - start
+                    if diff > 60:
+                        raise
+                    time.sleep(0.001)
+        elif key.startswith('disk_'):
+            attr = key[5:]
+            setattr(self._disk, attr, value)
+
+        setattr(self, key, value)
+        return value
+
+
+
+class LookupModule(LookupBase):
+    def run(self, terms, variables=None, **kwargs):
+        lookup_name, terms = terms[0], terms[1:]
+
+        with Cache(os.path.join(C.DEFAULT_LOCAL_TMP, "cached_lookup")) as cache:
+            key = (lookup_name, terms, kwargs)
+
+            try:
+                result = cache[key]
+                display.verbose("'cached' lookup cache hit for %r" % (key,))
+            except KeyError:
+                # Based on
+                # https://github.com/ansible/ansible/blob/v2.6.1/lib/ansible/vars/manager.py#L495
+                lookup = lookup_loader.get(
+                    lookup_name, loader=self._loader, templar=self._templar
+                )
+                if lookup is None:
+                    raise AnsibleError("lookup plugin (%s) not found" % lookup_name)
+
+                result = lookup.run(terms, variables=variables, **kwargs)
+                cache[key] = result
+                display.verbose("'cached' lookup cache miss for %r" % (key,))
+
+        return result
diff --git a/playbook-router.yml b/playbook-router.yml
@@ -0,0 +1,306 @@
+---
+- hosts: lollo
+  remote_user: root
+  gather_facts: false
+  tasks:
+    - name: "[Alpine] Install Python"
+      raw: test -e /usr/bin/python || (test -e /sbin/apk && apk update && apk add python3; true)
+    - name: "[Archlinux] Install Python"
+      raw: test -e /usr/bin/python || (test -e /usr/bin/pacman && pacman -Sy --noconfirm python; true)
+
+
+- hosts: lollo
+  name:  Install lollo
+  roles: 
+    - role: common
+      tags: common
+    - role: kawaidesu.ansible_networkd
+      tags: systemd-networkd
+    - role: acme-redirect
+      tags: acme-redirect
+    - role: nginx
+      tags: nginx
+    - role: hostapd
+      tags: hostapd
+    - role: dnsmasq
+      tags: dnsmasq
+    - role: syncthing
+      tags: syncthing
+    - role: frp
+      tags:
+        - frp
+        - frpc
+        - frps 
+
+  vars:
+    system:
+      hostname: lollo
+      domain: ctu.cx
+      timezone: Europe/Berlin
+      enableOwnRepos: true
+      enableSSH: true
+      enableSudo: true
+      useNTP: true #todo: support archlinux
+      nameservers:
+        - 1.1.1.1
+        - 8.8.8.8
+      users:
+        - name: leah
+          groups: "wheel"
+          password: "$6$foobar123$1qcCmnoveirSdWY9XdgH5hCXv32hj0n/AyJX46sSp1LyGCA8QT/xxifebRxr89uIH6vwhzFGgz4.H2sG0en0f0"
+          sshKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCw/G6x8H3ojvHx3NsTswBMMmOhp48F3rea0GUniKSvRLMRIti5b7Q4P4FXnkQEtuNSR3u7gE5r4EacaLaIx7Az9SgHRoE+hdzSo4mPAwKTx/E3HZgIjdZhTDL8PAn4SZZT6RBqr/uGb+x9fdIjY0FbdNBLjq0MNnG3T+qd1joUL8JXoS7F//ac52RhHlsA5qJXFDOhpqR/7hRMwOFNH0GKaLN1xQKcOjhpIcdswpOf8kRDVpT7xOYwfXCFF4MaY2M8047WKarvEnGdADIIw6bvWsdJINehtOQmYEFRaMuaWp1d9bglZXZKPQKNubv5lqneMP4AI7ImDYjgW6eNLIT1 cardno:000603502829\nssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGrvhqC/tZzpLMs/qy+1xNSVi2mfn8LXPIEhh7dcGn9e isa@Isabelles-MacBook-Pro.local"
+
+    network:
+      ipForwarding: true
+      ferm:
+        enable: true
+        configFile: config-files/ferm/lollo.conf
+
+    networkd:
+      networkd_resolv_conf_content:
+        - nameserver 1.1.1.1
+        - nameserver 8.8.8.8
+      networkd_apply_action: "restart"
+      netdev:
+        - name: enp2s0.5
+          priority: 20
+          content:
+            - NetDev:
+              - Name: enp2s0.5
+              - Kind: vlan
+            - VLAN:
+              - Id: 5
+        - name: wg-pbb
+          priority: 30
+          content:
+            - NetDev:
+              - Name: wg-pbb
+              - Kind: wireguard
+            - WireGuard:
+              - PrivateKey: "{{ lookup('diskcache', 'community.general.passwordstore', 'server/lollo/wireguard.privkey returnall=true') }}"
+              - FirewallMark: 51820
+            - WireGuardPeer:
+              - PublicKey: "{{ lookup('diskcache', 'community.general.passwordstore', 'server/desastro/wireguard.pubkey returnall=true') }}"
+              - AllowedIPs:  "0.0.0.0/0, ::/0"
+              - Endpoint: "195.39.247.172:51820"
+              - PersistentKeepalive: 10
+        - name: brlan
+          priority: 40 
+          content:
+            - NetDev:
+              - Name: brlan
+              - Kind: bridge
+      network:
+        - name: enp2s0
+          priority: 20
+          content:
+            - Match:
+              - Name: enp2s0
+            - Network:
+              - DHCP: yes
+              - VLAN: enp2s0.5
+        - name: enp2s0.5
+          priority: 20
+          content:
+            - Match:
+              - Name: enp2s0.5
+            - Network:
+              - Bridge: brlan
+        - name: wg-pbb
+          priority: 30
+          content:
+            - Match:
+              - Name: wg-pbb
+            - Link:
+              - MTUBytes: 1472
+            - Network:
+              - Address: 195.39.246.32/32
+              - Address: 2a0f:4ac0:acab::1/128
+            - Route:
+              - Destination: 0.0.0.0/0
+              - Table: 1234
+            - Route:
+              - Destination: ::/0
+              - Table: 1234
+        - name: brlan
+          priority: 40
+          content:
+            - Match:
+              - Name: brlan
+              - Driver: bridge
+            - Network:
+              - DHCP: no
+              - Address: 195.39.246.32/28
+              - Address: 10.0.0.1/24
+              - Address: 2a0f:4ac0:acab::1/48
+            - RoutingPolicyRule:
+              - From: 195.39.246.32/28
+              - Table: 254
+              - Priority: 1900
+              - SuppressPrefixLength: 0
+            - RoutingPolicyRule:
+              - From: 2a0f:4ac0:acab::/48
+              - Table: 254
+              - Priority: 1900
+              - SuppressPrefixLength: 0
+            - RoutingPolicyRule:
+              - From: 195.39.246.32/28
+              - Table: 1234
+              - Priority: 2000
+            - RoutingPolicyRule:
+              - From: 2a0f:4ac0:acab::/48
+              - Table: 1234
+              - Priority: 2000
+        - name: usb-tetherring
+          priority: 91
+          content:
+            - Match:
+              - Name: enp*s*u*
+            - Network:
+              - DHCP: yes
+
+    services:
+      prometheus_node_exporter:
+        enable: true
+
+      acme_redirect:
+        enable: true
+        email: lets-encrypt@ctu.cx
+        acme_url: https://api.buypass.com/acme/directory
+        certs:
+          lollo.ctu.cx:
+            dns_names: 
+              - lollo.ctu.cx
+            renew_tasks:
+              - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/lollo.ctu.cx
+          syncthing.lollo.ctu.cx:
+            dns_names: 
+              - syncthing.lollo.ctu.cx
+            renew_tasks:
+              - chown -R acme-redirect:acme-redirect /var/lib/acme-redirect/live/syncthing.lollo.ctu.cx
+
+      nginx:
+        enable: true
+        sslOnly: true
+        vhosts:
+          lollo.ctu.cx:
+            defaultserver: true
+            root: /var/lib/websites/lollo.ctu.cx
+            extraConfig: "
+              index index.html index.php;
+              try_files $uri $uri/ /  index.php?$query_string;
+            "
+            ssl:
+              enable: true
+              cert: "/var/lib/acme-redirect/live/lollo.ctu.cx/fullchain"
+              privkey: "/var/lib/acme-redirect/live/lollo.ctu.cx/privkey"
+            locations:
+              - path: /node-exporter
+                proxy: http://127.0.0.1:9100/metrics
+              - path: ~ \.php$
+                extraConfig: "
+                  fastcgi_pass unix:/run/php-fpm/php-fpm.sock;
+                  fastcgi_index index.php;
+                  include fastcgi_params;
+                "
+
+      hostapd:
+        enable: true
+        interface: wlp3s0
+        bridge: brlan
+        channel: 1
+        ssid: legacy.home.ctu.cx
+        passphrase: "{{ lookup('diskcache', 'community.general.passwordstore', 'WiFi/legacy.home.ctu.cx returnall=true')}}"
+
+      dnsmasq:
+        enable: true
+        local_service: true
+        no_resolv: true
+        domain_needed: true
+        bogus_priv: true
+        expand_hosts: false
+        read_ethers: false
+        enable_ra: true
+        quiet_ra: true
+        domain: home.ctu.cx
+        auth_zone:
+          - home.ctu.cx,              10.0.0.1/24, 195.39.246.33/28, 2a0f:4ac0:acab::1/64
+          - home.flauschekatze.space, 10.0.0.1/24, 195.39.246.33/28, 2a0f:4ac0:acab::1/64
+        local_addresses:
+          - /fritz.box/192.168.178.1
+          - /intel-nuc/192.168.178.21
+          - /lollo/192.168.178.20
+          - /repo-vm/192.168.178.24
+          - /mastodon-backup/192.168.178.25
+          - /foo-nuc/192.168.178.23
+          - /lollo.ctu.cx/10.0.0.1
+          - /home.ctu.cx/10.0.0.1
+          - /home.flauschekatze.space/10.0.0.1
+        addresses:
+          - home.ctu.cx,                    195.39.246.32
+          - home.flauschekatze.space,       195.39.246.32
+        dns_servers:
+          - 1.1.1.1
+          - 1.0.0.1
+          - 8.8.8.8
+          - 8.8.4.4
+        dhcp:
+          authoritative: true
+          rapid_commit: true
+          sequential_ip: true
+          options:
+            - option6:information-refresh-time,6h
+            - option6:dns-server,[2a0f:4ac0:acab::1]
+            - option:dns-server,10.0.0.1
+            - option:router,10.0.0.1
+          ranges:
+            - 195.39.246.33,       195.39.246.42,                        255.255.255.240, 48h
+            - 10.0.0.32,           10.0.0.160,                           255.255.255.0,   48h
+            - 2a0f:4ac0:acab::100, 2a0f:4ac0:acab::01ff, ra-names,slaac, 64,              48h
+          hosts:
+            # ctucx macbook
+            - id:00:01:00:01:27:51:55:30:80:e6:50:21:e0:6a,              toaster,          [2a0f:4ac0:acab::33]
+            - 80:e6:50:21:e0:6a,                                         toaster,          195.39.246.33
+            # ctucx thinkcentre
+            - id:00:01:00:01:27:60:18:8c:e8:6a:64:f4:49:e7,              stasicontainer,   [2a0f:4ac0:acab::39]
+            - e8:6a:64:f4:49:e7,                                         stasicontainer,   195.39.246.39
+            # ctucx thinkpad (mac: wlan, eth)
+            - id:00:04:37:8e:fd:cc:26:b8:11:b2:a8:5c:b8:77:0b:6e:a2:e6,  coladose,         [2a0f:4ac0:acab::35]
+            - 7c:2a:31:fb:e6:b8, 8c:16:45:da:61:8e,                      coladose,         195.39.246.35
+            # isa macbook
+            - id:00:01:00:01:23:53:5d:7e:6c:40:08:af:2e:9c,              isabelles-mbp,    [2a0f:4ac0:acab::38]
+            - 6c:40:08:af:2e:9c,                                         isabelles-mbp,    195.39.246.38
+            # isa thinkpad (x230)
+            - id:00:04:e8:51:c5:1d:f6:53:58:4a:9b:c0:28:59:a4:c7:76:32,  isa-x230,         [2a0f:4ac0:acab::36]
+            - 64:80:99:75:c5:5c,                                         isa-x230,         195.39.246.36
+        extraConfig: "
+          auth-ttl=600\n
+          auth-server=home.ctu.cx,wg-pbb
+          auth-server=home.flauschekatze.space,wg-pbb
+        "
+
+      syncthing:
+        enable: true
+        user: leah
+        nginx:
+          enable: true
+          domain: "syncthing.lollo.ctu.cx"
+          sslOnly: true
+          ssl:
+            enable: true
+            cert: "/var/lib/acme-redirect/live/syncthing.lollo.ctu.cx/fullchain"
+            privkey: "/var/lib/acme-redirect/live/syncthing.lollo.ctu.cx/privkey"
+
+      frpc:
+        enable: true
+        serverAddress: wanderduene.ctu.cx
+        serverPort: 5050
+        token: "{{ lookup('diskcache', 'community.general.passwordstore', 'Server/wanderduene/frps/token returnall=true')}}"
+        dashboard: false
+        tunnels:
+          - name: lollo-ssh
+            type: tcp
+            local_ip: 127.0.0.1
+            local_port: 22
+            remote_port: 2202
+    
diff --git a/playbook-servers.yml b/playbook-servers.yml
@@ -0,0 +1,84 @@
+---
+- hosts: all
+  remote_user: root
+  gather_facts: false
+  tasks:
+    - name: "[Alpine] Install Python"
+      raw: test -e /usr/bin/python || (test -e /sbin/apk && apk update && apk add python3; true)
+    - name: "[Archlinux] Install Python"
+      raw: test -e /usr/bin/python || (test -e /usr/bin/pacman && pacman -Sy --noconfirm python; true)
+
+- hosts: wanderduene
+  name:  Install wanderduene
+  vars_files: configuration/wanderduene.yml
+  roles: 
+    - role: common            # supports: alpine, arch
+      tags: common
+    - role: bind              # supports: alpine, arch(untested)
+      tags: bind
+    - role: acme-redirect     # supports: alpine, arch
+      tags: acme-redirect
+    - role: nginx             # supports: alpine, arch
+      tags: nginx
+    - role: gitolite          # supports: alpine, arch(untested)
+      tags: gitolite
+    - role: cgit              # supports: alpine, arch(untested)
+      tags: cgit
+    - role: oeffisearch       # supports: alpine
+      tags: oeffisearch
+    - role: oeffi-web         # supports: alpine
+      tags: oeffi-web
+    - role: maddy             # supports: alpine
+      tags: maddy
+    - role: radicale          # supports: alpine, arch(untested)
+      tags: radicale
+    - role: websites          # supports: alpine, arch(untested)
+      tags: websites
+    - role: pleroma           # supports: alpine
+      tags: pleroma
+    - role: synapse           # supports: alpine, arch(untested)
+      tags: synapse
+    - role: prometheus        # supports: alpine, arch(untested)
+      tags: prometheus
+    - role: grafana           # supports: alpine, arch(untested)
+      tags: grafana
+    - role: frp               # frps supports: alpine, arch(untested)
+      tags: [ frp, frps ]
+    - role: backup            # todo
+      tags: backup
+
+
+- hosts: taurus
+  name: Install taurus
+  vars_files: configuration/taurus.yml
+  roles:
+    - role: common            # supports: alpine, arch
+      tags: common
+    - role: bind              # supports: alpine, arch(untested)
+      tags: bind
+    - role: acme-redirect     # supports: alpine, arch
+      tags: acme-redirect
+    - role: nginx             # supports: alpine, arch
+      tags: nginx
+    - role: syncthing         # supports: alpine, arch
+      tags: syncthing
+    - role: websites          # todo 
+      tags: websites
+    - role: rest-server       # supports: alpine, arch(untested)
+      vars:
+        rest_server:
+          nginx:
+            password: "{}"
+      tags: [ backup, rest-server, restic ]
+
+
+- hosts: joguhrtbecher
+  name: Install joguhrtbecher
+  vars_files: configuration/joguhrtbecher.yml
+  roles:
+    - role: common            # supports: alpine, arch
+      tags: common
+    - role: nginx             # supports: alpine, arch
+      tags: nginx
+    - role: syncthing         # supports: alpine, arch
+      tags: syncthing+
\ No newline at end of file
diff --git a/roles/acme-redirect/files/awall-rule.json b/roles/acme-redirect/files/awall-rule.json
@@ -0,0 +1,13 @@
+{
+  "description": "Allow HTTP on WAN (for acme-redirect)",
+  "import": [ "base" ],
+
+  "filter": [
+    {
+      "in": "WAN",
+      "out": "_fw",
+      "service": "http",
+      "action": "accept"
+    }
+  ]
+}
diff --git a/roles/acme-redirect/tasks/main.yml b/roles/acme-redirect/tasks/main.yml
@@ -0,0 +1,234 @@
+---
+
+# install it 
+
+- name: "[Alpine] Install package: acme-redirect"
+  apk:
+    name: acme-redirect
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.acme_redirect.enable is true
+
+- name: "[Archlinux] Install package: acme-redirect"
+  pacman:
+    name: acme-redirect
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Archlinux" 
+    - services.acme_redirect.enable is true
+
+
+# configure it 
+
+- name: "[Alpine] create sudoers file for acme-redirect"
+  copy:
+    content: "acme-redirect ALL=NOPASSWD:/sbin/rc-service\n"
+    dest: /etc/sudoers.d/acme-redirect
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.acme_redirect.enable is true
+
+- name: "[Archlinux] create sudoers file for acme-redirect"
+  copy:
+    content: "acme-redirect ALL=NOPASSWD:/usr/bin/systemctl\n"
+    dest: /etc/sudoers.d/acme-redirect
+  when: 
+    - ansible_distribution == "Archlinux" 
+    - services.acme_redirect.enable is true
+
+- name: Create acme-redirect.conf
+  template:
+    src: acme-redirect-general.conf.j2
+    dest: /etc/acme-redirect.conf
+    owner: acme-redirect
+    group: acme-redirect
+  when: 
+    - services.acme_redirect.enable is true
+
+- name: clean cert-config directory
+  file:
+    state: "{{ item }}"
+    path: /etc/acme-redirect.d
+    owner: acme-redirect
+    group: acme-redirect
+    mode: 0755
+  with_items:
+    - absent
+    - directory
+  when: 
+    - services.acme_redirect.enable is true
+    - services.acme_redirect.certs is defined
+
+- name: Generate acme-redirect cert configs
+  template:
+    src: acme-redirect.conf.j2
+    dest: /etc/acme-redirect.d/{{item.key}}.conf
+    owner: acme-redirect
+    group: acme-redirect
+    mode: 0644
+  loop: "{{ lookup('dict', services.acme_redirect.certs, wantlist=True) }}"
+  when: 
+    - services.acme_redirect.enable is true
+    - services.acme_redirect.certs is defined
+
+
+# firewall it 
+
+- name: "[awall] Create rule for: acme-redirect"
+  copy:
+    src: awall-rule.json
+    dest: /etc/awall/optional/acme-redirect.json
+    validate: jq '.' %s
+  when: 
+    - ansible_distribution == "Alpine" 
+    - network.awall.enable is true
+    - services.acme_redirect.enable is true
+
+- name: "[awall] Enable rule for: acme-redirect"
+  awall:
+    name: acme-redirect
+    state: enabled
+    activate: yes
+  when: 
+    - ansible_distribution == "Alpine" 
+    - network.awall.enable is true
+    - services.acme_redirect.enable is true
+
+
+# restart and enable it 
+
+- name: "[OpenRC] Enable and restart service: acme-redirect"
+  service:
+    name: acme-redirect
+    enabled: yes
+    state: restarted
+  when: 
+    - ansible_service_mgr == "openrc"
+    - services.acme_redirect.enable is true
+
+- name: "[systemd] Enable and restart service: acme-redirect"
+  systemd:
+    name: acme-redirect
+    enabled: yes
+    state: restarted
+  when: 
+    - ansible_service_mgr == "systemd"
+    - services.acme_redirect.enable is true
+
+- command:
+    cmd: acme-redirect check -q
+  register: acme_check
+  become: yes
+  become_user: acme-redirect
+  when: 
+    - services.acme_redirect.enable is true
+
+- fail:
+    msg: "Check of Certs failed: {{acme_check.stdout}}"
+  when: 
+    - services.acme_redirect.enable is true
+    - acme_check.stdout | length > 0
+
+- command:
+    cmd: acme-redirect renew -q
+  register: acme_renew
+  become: yes
+  become_user: acme-redirect
+  when: 
+    - services.acme_redirect.enable is true
+
+- fail:
+    msg: "Renew of certs failed: {{acme_renew.stdout}}"
+  when: 
+    - services.acme_redirect.enable is true
+    - acme_renew.stdout | length > 0
+
+
+# stop it 
+
+- name: "[OpenRC] Disable and stop service: acme-redirect"
+  service:
+    name: acme-redirect
+    enabled: no
+    state: stopped
+  when: 
+    - ansible_service_mgr == "openrc"
+    - services.acme_redirect.enable is true
+
+- name: "[systemd] Disable and stop service: acme-redirect"
+  systemd:
+    name: acme-redirect
+    enabled: no
+    state: stopped
+  when: 
+    - ansible_service_mgr == "systemd"
+    - services.acme_redirect.enable is false
+
+
+#defirewall it
+
+- name: "[awall] Disable rule for: acme-redirect"
+  awall:
+    name: acme-redirect
+    state: disabled
+    activate: yes
+  when:
+    - ansible_distribution == "Alpine" 
+    - network.awall.enable is true
+    - services.acme_redirect.enable is false
+
+- name: "[awall] Delete rule for: acme-redirect"
+  file:
+    path: /etc/awall/optional/acme-redirect.json
+    state: absent 
+  when:
+    - ansible_distribution == "Alpine" 
+    - network.awall.enable is true
+    - services.acme_redirect.enable is false
+
+
+# remove it 
+
+- name: "[Alpine] Remove package: acme-redirect"
+  apk:
+    name: acme-redirect
+    state: absent
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.acme_redirect.enable is false
+
+- name: "[Archlinux] Remove package: acme-redirect"
+  pacman:
+    name: acme-redirect
+    state: absent
+  when: 
+    - ansible_distribution == "Archlinux" 
+    - services.acme_redirect.enable is false
+
+
+# remove leftover files
+
+- name: "Remove directory: /etc/acme-redirect.d"
+  file:
+    path: /etc/acme-redirect.d
+    state: absent
+  when: 
+    - services.acme_redirect.enable is false
+
+- name: "Remove directory: /var/lib/acme-redirect"
+  file:
+    path: /var/lib/acme-redirect
+    state: absent
+  when: 
+    - services.acme_redirect.enable is false
+
+- name: "Remove file: /etc/acme-redirect.conf"
+  file:
+    path: /etc/acme-redirect.conf
+    state: absent
+  when: 
+    - services.acme_redirect.enable is false
+
diff --git a/roles/acme-redirect/templates/acme-redirect-general.conf.j2 b/roles/acme-redirect/templates/acme-redirect-general.conf.j2
@@ -0,0 +1,7 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+[acme]
+acme_email = "{{ services.acme_redirect.email }}"
+acme_url   = "{{ services.acme_redirect.acme_url }}"
diff --git a/roles/acme-redirect/templates/acme-redirect.conf.j2 b/roles/acme-redirect/templates/acme-redirect.conf.j2
@@ -0,0 +1,16 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+[cert]
+name      = "{{item.key}}"
+dns_names = [
+{% for domain in item.value.dns_names %}
+    "{{domain}}",
+{% endfor %}
+]
+exec = [
+{% for task in item.value.renew_tasks %}
+    "{{task}}",
+{% endfor %}
+]
diff --git a/roles/backup/tasks/main.yml b/roles/backup/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+
+- name: "[Alpine] Install package: restic"
+  apk:
+    name: restic
+    state: present
+    update_cache: yes
+  when:
+    - ansible_distribution == "Alpine"
+
+- include: wanderduene.yml
+  when: system.hostname == "wanderduene"
diff --git a/roles/backup/tasks/wanderduene.yml b/roles/backup/tasks/wanderduene.yml
@@ -0,0 +1,54 @@
+---
+
+- name: create password file for rest-server
+  copy:
+    content: "{{ lookup('community.general.passwordstore', 'server/taurus/rest-server.plain returnall=true')}}"
+    dest:    /var/lib/restic-password
+    mode:    0755
+    owner:   root
+    group:   root
+
+- name: create password files for services
+  copy:
+    content: "{{ lookup('community.general.passwordstore', 'server/{{system.hostname}}/restic/{{item}} returnall=true')}}"
+    dest:    "/var/lib/{{item}}/restic-password"
+    owner:   "{{item}}"
+    group:   "{{item}}"
+    mode:    0700
+  loop:
+    - maddy
+    - radicale
+    - git
+    - pleroma
+    - synapse
+    - oeffisearch
+
+- name: create password file for postgresql
+  copy:
+    content: "{{ lookup('community.general.passwordstore', 'server/{{system.hostname}}/restic/postgresql returnall=true')}}"
+    dest:    /var/lib/postgresql/restic-password
+    owner:   postgres
+    group:   postgres
+    mode:    0700
+
+- name: create password file for htmldir
+  copy:
+    content: "{{ lookup('community.general.passwordstore', 'server/{{system.hostname}}/restic/websites returnall=true')}}"
+    dest:    /var/lib/websites/restic-password
+    owner:   leah
+    group:   leah
+    mode:    0700
+
+- name: Copy backup-script to server
+  copy:
+    src: scripts/restic-backup-{{system.hostname}}.sh
+    dest: /root/restic-backup.sh
+    mode: 0755
+
+- name: create crontab entry
+  cron:
+    name: "run restic-backups"
+    special_time: daily
+    user: root
+    job: "/root/restic-backup.sh > /dev/null"
+    state: present
diff --git a/roles/bind/files/awall-rule.json b/roles/bind/files/awall-rule.json
@@ -0,0 +1,13 @@
+{
+  "description": "Allow DNS on WAN (for bind)",
+  "import": [ "base" ],
+
+  "filter": [
+    {
+      "in": "WAN",
+      "out": "_fw",
+      "service": "dns",
+      "action": "accept"
+    }
+  ]
+}
diff --git a/roles/bind/tasks/main.yml b/roles/bind/tasks/main.yml
@@ -0,0 +1,214 @@
+---
+
+# install it
+
+- name: "[Alpine] Install package: bind"
+  apk:
+    name: bind
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.bind.enable is true
+
+- name: "[Archlinux] Install package: bind"
+  pacman:
+    name: bind
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Archlinux" 
+    - services.bind.enable is true
+
+
+- name: "Clone specified zone-repo to: /var/lib/bind/zones"
+  git:
+    repo: '{{ services.bind.zonesRepo }}'
+    dest: /var/lib/named/zones
+  register: zonesClone
+  when:
+    - services.bind.enable is true
+    - services.bind.zonesRepo is defined
+
+- name: "Set correct permissions for: /var/lib/named"
+  file:
+    path: /var/lib/named
+    owner: named
+    group: named
+    state: directory
+    recurse: yes
+  register: setPermissions
+  when:
+    - services.bind.enable is true
+
+- name: "[Alpine] Generate named.conf"
+  template:
+    src: named.conf.j2
+    dest: /etc/bind/named.conf
+    owner:  named
+    group: named
+  register: namedConfig
+  when:
+    - ansible_distribution == "Alpine"  
+    - services.bind.enable is true
+
+- name: "[Archlinux] Generate named.conf"
+  template:
+    src: named.conf.j2
+    dest: /etc/named.conf
+    owner:  named
+    group: named
+  register: namedConfig
+  when:
+    - ansible_distribution == "Archlinux"  
+    - services.bind.enable is true
+
+
+# (re)start it
+
+- name: "[OpenRC] Enable and start service: named"
+  service:
+    name: named
+    enabled: yes
+    state: started
+  when: 
+    - ansible_service_mgr == "openrc"
+    - services.bind.enable is true
+
+- name: "[systemd] Enable and start service: named"
+  systemd:
+    name: named
+    enabled: yes
+    state: started
+  when: 
+    - ansible_service_mgr == "systemd"
+    - services.bind.enable is true
+
+- name: "[OpenRC] Restart service: named"
+  service:
+    name: named
+    state: restarted
+  when: 
+    - ansible_service_mgr == "openrc"
+    - services.bind.enable is true
+    - zonesClone.changed or setPermissions.changed or namedConfig.changed
+
+- name: "[systemd] Restart service: named"
+  systemd:
+    name: named
+    state: restarted
+  when: 
+    - ansible_service_mgr == "systemd"
+    - services.bind.enable is true
+    - zonesClone.changed or setPermissions.changed or namedConfig.changed
+
+
+#firewall it 
+
+- name: "[awall] Create rule for: bind"
+  copy:
+    src: awall-rule.json
+    dest: /etc/awall/optional/bind.json
+    validate: jq '.' %s
+  when:
+    - ansible_distribution == "Alpine" 
+    - network.awall.enable is true
+    - services.bind.enable is true
+
+- name: "[awall] Enable rule for: bind"
+  awall:
+    name: bind
+    state: enabled
+    activate: yes
+  when:
+    - ansible_distribution == "Alpine" 
+    - network.awall.enable is true
+    - services.bind.enable is true
+
+
+# stop it
+
+- name: "[OpenRC] Disable and stop service: named"
+  service:
+    name: named
+    enabled: no
+    state: stopped
+  when: 
+    - ansible_service_mgr == "openrc"
+    - services.bind.enable is false
+
+- name: "[systemd] Disable and stop service: named"
+  systemd:
+    name: named
+    enabled: no
+    state: stopped
+  when: 
+    - ansible_service_mgr == "systemd"
+    - services.bind.enable is false
+
+
+#defirewall it
+
+- name: "[awall] Disable rule for: bind"
+  awall:
+    name: bind
+    state: disabled
+    activate: yes
+  when:
+    - ansible_distribution == "Alpine" 
+    - network.awall.enable is true
+    - services.bind.enable is false
+
+- name: "[awall] Delete rule for: bind"
+  file:
+    path: /etc/awall/optional/bind.json
+    state: absent 
+  when:
+    - ansible_distribution == "Alpine" 
+    - network.awall.enable is true
+    - services.bind.enable is false
+
+
+# deinstall it
+
+- name: "[Alpine] Remove package: bind"
+  apk:
+    name: bind
+    state: absent
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.bind.enable is false
+
+- name: "[Archlinux] Remove package: bind"
+  pacman:
+    name: bind
+    state: absent
+  when: 
+    - ansible_distribution == "Archlinux" 
+    - services.bind.enable is false
+
+
+# remove leftover files
+
+- name: "Remove directory: /etc/bind"
+  file:
+    path: /etc/bind
+    state: absent
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.bind.enable is false
+
+- name: "Remove file: /etc/name.conf"
+  file:
+    path: /etc/named.conf
+    state: absent
+  when: 
+    - ansible_distribution == "Archlinux" 
+    - services.bind.enable is false
+
+- name: "Remove directory: /var/lib/named"
+  file:
+    path: /var/lib/named
+    state: absent
+  when: 
+    - services.bind.enable is false
diff --git a/roles/bind/templates/named.conf.j2 b/roles/bind/templates/named.conf.j2
@@ -0,0 +1,17 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+options { 
+	directory "/var/lib/named"; 
+}; 
+
+{% for domain in services.bind.serveDomains %}
+zone "{{ domain }}" in {
+	type master;
+	file "/var/lib/named/zones/{{ domain }}.zone";
+};
+
+{% endfor %}
+
+controls { };+
\ No newline at end of file
diff --git a/roles/cgit/tasks/main.yml b/roles/cgit/tasks/main.yml
@@ -0,0 +1,161 @@
+---
+
+# check 
+
+- fail: msg="This role currently only supports AlpineLinux!"
+  when:
+    - services.cgit.enable is true
+    - ansible_distribution != "Alpine" 
+
+- fail: msg="Option 'services.cgit.configFile' has to be set!"
+  when:
+    - services.cgit.enable is true
+    - services.cgit.configFile is not defined 
+
+
+# install it 
+
+- name: "[Alpine] Install Package(s): cgit and it's dependecys"
+  apk:
+    name: cgit git spawn-fcgi fcgiwrap py3-markdown py3-pygments
+    state: present
+    update_cache: yes
+  when:
+    - services.cgit.enable is true
+    - ansible_distribution == "Alpine"
+
+
+# configure it
+
+- name: "Create fcgi-service for: cgit"
+  file:
+    src: /etc/init.d/spawn-fcgi
+    dest: /etc/init.d/spawn-fcgi.cgit
+    state: link
+  when:
+    - services.cgit.enable is true
+    - ansible_distribution == "Alpine"
+
+- name: "Create config for cgit's fcgi-service"
+  copy: 
+    content: "FCGI_PORT=8001\nFCGI_PROGRAM=/usr/bin/fcgiwrap"
+    dest: /etc/conf.d/spawn-fcgi.cgit
+  when:
+    - services.cgit.enable is true
+
+- name: "[OpenRC] Enable and start service: spawn-fcgi.cgit"
+  service:
+    name: spawn-fcgi.cgit
+    enabled: yes
+    state: started
+  when:
+    - ansible_service_mgr == "openrc"
+    - services.cgit.enable is true
+
+- name: "[nginx] Create vhost" 
+  template: 
+    src: nginx-vhost.conf.j2
+    dest: /etc/nginx/conf.d/cgit.conf
+    mode: 0644
+    owner: nginx
+    group: nginx
+  when:
+    - services.cgit.enable is true
+    - services.cgit.nginx.enable is defined
+    - services.cgit.nginx.enable is true
+
+- name: "Copy cgitrc to: /etc/cgitrc"
+  copy: 
+    src: "{{ services.cgit.configFile }}"
+    dest: /etc/cgitrc
+    mode: 0644
+  when:
+    - services.cgit.enable is true
+    - services.cgit.configFile is defined
+
+- name: "Copy custom css file to: /usr/share/webapps/cgit/custom-cgit.css"
+  copy: 
+    src: "{{ services.cgit.customCssFile }}"
+    dest: /usr/share/webapps/cgit/custom-cgit.css
+    mode: 0644
+  when:
+    - services.cgit.enable is true
+    - services.cgit.customCssFile is defined
+
+- name: Adding user nginx to group git
+  user:
+    name: nginx
+    groups: git
+    append: yes
+  when:
+    - services.cgit.enable is true
+    - services.cgit.nginx.enable is defined
+    - services.cgit.nginx.enable is true
+
+- name: "[OpenRC] Restart service: nginx"
+  service:
+    name: nginx
+    state: restarted
+  when:
+    - ansible_service_mgr == "openrc"
+    - services.cgit.enable is true
+    - services.cgit.nginx.enable is defined
+    - services.cgit.nginx.enable is true
+
+
+# remove it
+
+- name: "[OpenRC] Disable and stop service: spawn-fcgi.cgit"
+  service:
+    name: spawn-fcgi.cgit
+    enabled: no
+    state: stopped
+  when:
+    - ansible_service_mgr == "openrc"
+    - services.cgit.enable is false
+
+- name: "[Alpine] Remove Package(s): cgit and it's dependecys"
+  apk:
+    name: cgit spawn-fcgi fcgiwrap py3-markdown py3-pygments
+    state: absent
+  when:
+    - services.cgit.enable is false
+    - ansible_distribution == "Alpine"
+
+
+# remove leftover files
+
+- name: "Remove file: /etc/cgitrc"
+  file:
+    path: /etc/cgitrc
+    state: absent
+  when:
+    - services.cgit.enable is false
+
+- name: "Remove directory: /usr/share/webapps/cgit"
+  file:
+    path: /usr/share/webapps/cgit
+    state: absent
+  when:
+    - services.cgit.enable is false
+
+- name: "Remove file: /etc/conf.d/spawn-fcgi.cgit"
+  file:
+    path: /etc/conf.d/spawn-fcgi.cgit
+    state: absent
+  when:
+    - services.cgit.enable is false
+
+- name: "Remove file: /etc/init.d/spawn-fcgi.cgit"
+  file:
+    path: /etc/init.d/spawn-fcgi.cgit
+    state: absent
+  when:
+    - services.cgit.enable is false
+
+- name: "Remove file: /etc/nginx/conf.d/cgit.conf"
+  file:
+    path: /etc/nginx/conf.d/cgit.conf
+    state: absent
+  when:
+    - services.cgit.enable is false
diff --git a/roles/cgit/templates/nginx-vhost.conf.j2 b/roles/cgit/templates/nginx-vhost.conf.j2
@@ -0,0 +1,43 @@
+{% if  services.cgit.nginx.sslOnly is not defined or services.cgit.nginx.sslOnly is false %}
+server {
+	listen 80 ;
+	listen [::]:80;
+	
+	server_name {{ services.cgit.nginx.domain }};
+
+	root /usr/share/webapps/cgit;
+	try_files $uri @cgit;
+
+	location @cgit {
+		include fastcgi_params;
+		fastcgi_pass localhost:8001;
+		fastcgi_param SCRIPT_FILENAME /usr/share/webapps/cgit/cgit.cgi;
+		fastcgi_param PATH_INFO $uri;
+		fastcgi_param QUERY_STRING $args;
+	}
+}
+
+{% endif %}
+{% if services.cgit.nginx.ssl.enable is true %}
+server {
+	listen 443 ssl;
+	listen [::]:443 ssl;
+
+	ssl_certificate "{{ services.cgit.nginx.ssl.cert }}";
+	ssl_certificate_key "{{ services.cgit.nginx.ssl.privkey }}";
+	include /etc/nginx/ssl.conf;
+	
+	server_name {{ services.cgit.nginx.domain }};
+
+	root /usr/share/webapps/cgit;
+	try_files $uri @cgit;
+
+	location @cgit {
+		include fastcgi_params;
+		fastcgi_pass localhost:8001;
+		fastcgi_param SCRIPT_FILENAME /usr/share/webapps/cgit/cgit.cgi;
+		fastcgi_param PATH_INFO $uri;
+		fastcgi_param QUERY_STRING $args;
+	}
+}
+{% endif %}
diff --git a/roles/common/files/awall/custom-services.json b/roles/common/files/awall/custom-services.json
@@ -0,0 +1,7 @@
+{
+	"service": {
+		"frps": [
+			{ "proto": "tcp", "port": 5050 }
+		]
+	}
+}
diff --git a/alpine/config-files/awall/ssh.json b/roles/common/files/awall/ssh.json
diff --git a/alpine/config-files/ferm/ferm.initd b/roles/common/files/ferm.initd
diff --git a/roles/common/files/pacman.conf.patch b/roles/common/files/pacman.conf.patch
@@ -0,0 +1,21 @@
+--- /etc/pacman.conf	2020-07-01 03:52:38.000000000 +0200
++++ pacman.conf	2020-12-07 14:18:19.773580876 +0100
+@@ -1,4 +1,8 @@
+ #
++# !!! This file is managed by Ansible !!!
++#
++
++#
+ # /etc/pacman.conf
+ #
+ # See the pacman.conf(5) manpage for option and repository directives
+@@ -83,6 +83,10 @@
+ [community]
+ Include = /etc/pacman.d/mirrorlist
+
++[aurto]
++Server = https://repo.f2k1.de
++SigLevel = Optional TrustAll
++
+ # If you want to run 32 bit applications on your x86_64 system,
+ # enable the multilib repositories as required here.+
\ No newline at end of file
diff --git a/alpine/config-files/ssh/sshd_config.patch b/roles/common/files/sshd/alpine-sshd_config.patch
diff --git a/roles/common/files/sshd/archlinux-sshd_config.patch b/roles/common/files/sshd/archlinux-sshd_config.patch
@@ -0,0 +1,20 @@
+--- /etc/ssh/sshd_config	2020-12-09 09:49:18.056072245 +0100
++++ sshd_config	2020-12-09 11:20:20.876447415 +0100
+@@ -29,7 +33,7 @@
+ # Authentication:
+ 
+ #LoginGraceTime 2m
+-PermitRootLogin prohibit-password
++PermitRootLogin yes
+ #StrictModes yes
+ #MaxAuthTries 6
+ #MaxSessions 10
+@@ -54,7 +58,7 @@
+ #IgnoreRhosts yes
+ 
+ # To disable tunneled clear text passwords, change to no here!
+-PasswordAuthentication yes
++PasswordAuthentication no
+ #PermitEmptyPasswords no
+ 
+ # Change to no to disable s/key passwords
diff --git a/roles/common/files/sudoers.patch b/roles/common/files/sudoers.patch
@@ -0,0 +1,19 @@
+--- sudoers.orig
++++ sudoers.patch
+@@ -1,3 +1,7 @@
++#
++# !!! This file is managed by Ansible !!!
++#
++
+ ## sudoers file.
+ ##
+ ## This file MUST be edited with the 'visudo' command as root.
+@@ -79,7 +83,7 @@
+ root ALL=(ALL) ALL
+ 
+ ## Uncomment to allow members of group wheel to execute any command
+-# %wheel ALL=(ALL) ALL
++%wheel ALL=(ALL) ALL
+ 
+ ## Same thing without a password
+ # %wheel ALL=(ALL) NOPASSWD: ALL
diff --git a/roles/common/tasks/firewall-awall.yml b/roles/common/tasks/firewall-awall.yml
@@ -0,0 +1,86 @@
+---
+- name: "[Alpine] Install Package: awall"
+  apk:
+    name: awall lua-lyaml ip6tables
+    state: present
+    update_cache: yes
+  when: network.awall.enable is true
+
+- name: "Load kernel module: iptables"
+  raw: "modprobe ip_tables"
+  when: network.awall.enable is true
+
+- name: "[awall] Create custom-services file"
+  copy:
+    src: awall/custom-services.json
+    dest: /etc/awall/private/custom-services.json
+    validate: jq '.' %s
+  when: network.awall.enable is true
+
+- name: "[awall] Create base-config"
+  template:
+    src: awall-baseconfig.yaml.j2
+    dest: /etc/awall/private/base.yaml
+#    validate: jq '.' %s
+  when: network.awall.enable is true
+
+- name: "[awall] Copy rule for: ssh"
+  copy:
+    src: awall/ssh.json
+    dest: /etc/awall/optional/ssh.json
+    validate: jq '.' %s
+  when:
+    - network.awall.enable is true
+    - system.enableSSH is defined
+    - system.enableSSH is true 
+
+- name: "[awall] Activate rule for: ssh"
+  awall:
+    name: ssh
+    state: enabled
+    activate: yes
+  when: 
+    - network.awall.enable is true
+    - system.enableSSH is defined
+    - system.enableSSH is true
+
+- name: "[OpenRC] Enable and start service: iptables"
+  service:
+    name: iptables
+    enabled: yes
+    state: started
+  when: network.awall.enable is true
+
+- name: "[OpenRC] Enable and start service: ip6tables"
+  service:
+    name: ip6tables
+    enabled: yes
+    state: started
+  when: network.awall.enable is true
+
+
+- name: "[OpenRC] Disable and stop service: iptables"
+  service:
+    name: iptables
+    enabled: no
+    state: stopped
+  when: network.awall.enable is false
+
+- name: "[OpenRC] Disable and stop service: ip6tables"
+  service:
+    name: ip6tables
+    enabled: no
+    state: stopped
+  when: network.awall.enable is false
+
+- name: "[Alpine] Remove Package: awall"
+  apk:
+    name: awall lua-lyaml ip6tables
+    state: absent
+  when: network.awall.enable is false
+
+- name: "Delete directory: /etc/awall"
+  file:
+    path: /etc/awall
+    mode: absent
+  when: network.awall.enable is false
diff --git a/roles/common/tasks/firewall-ferm.yml b/roles/common/tasks/firewall-ferm.yml
@@ -0,0 +1,81 @@
+---
+
+- name: "[Alpine] Install Package: ferm"
+  apk:
+    name: ferm
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Alpine"
+    - network.ferm.enable is true 
+
+- name: "[Archlinux] Install Package: ferm"
+  pacman:
+    name: ferm
+    state: present
+    update_cache: yes
+  when:
+    - ansible_distribution == "Archlinux"
+    - network.ferm.enable is true
+
+- name: "[Alpine] Copy service file to destination"
+  copy:
+    src: ferm.initd
+    dest: /etc/init.d/ferm
+    mode: 0755
+  when: 
+    - ansible_distribution == "Alpine"
+    - network.ferm.enable is true
+
+- name: Fail when useFerm and no configFile
+  fail:
+    msg: Option 'network.ferm.configFile' not set!
+  when: 
+    - network.ferm.enable is true
+    - network.ferm.configFile is not defined
+
+- name: copy ferm config to destination
+  copy:
+    src: "{{ network.ferm.configFile }}"
+    dest: /etc/ferm.conf
+    mode: 0644
+  register: fermConfig
+  when: network.ferm.enable is true
+
+
+- name: "[OpenRC] Enable and start service: ferm"
+  service:
+    name: ferm
+    enabled: yes
+    state: started
+  when: 
+    - ansible_service_mgr == "openrc"
+    - network.ferm.enable is true
+
+- name: "[systemd] Enable and start service: ferm"
+  systemd:
+   name: ferm
+   enabled: yes
+   state: started
+  when: 
+    - ansible_service_mgr == "systemd"
+    - network.ferm.enable is true 
+
+
+- name: "[OpenRC] Restart service: ferm (to deploy new config)"
+  service:
+    name: ferm
+    state: restarted
+  when: 
+    - ansible_service_mgr == "openrc"
+    - network.ferm.enable is true
+    - fermConfig.changed
+
+- name: "[systemd] Restart service: ferm (to deploy new config)"
+  systemd:
+    name: ferm
+    state: restarted
+  when: 
+    - ansible_service_mgr == "systemd"
+    - network.ferm.enable is true
+    - fermConfig.changed
diff --git a/roles/common/tasks/main.yml b/roles/common/tasks/main.yml
@@ -0,0 +1,46 @@
+---
+
+# Why does this allways fail?
+# - name: Fail when distribution is unsupported
+#   fail:
+#     msg: This role only works on AlpineLinux and ArchLinux
+#   when: (ansible_distribution != "Alpine") or (ansible_distribution != "Archlinux")
+
+- include: packages.yml
+
+- include: timezone.yml
+  when: system.timezone is defined
+
+- include: network_general.yml
+
+- include: network_alpine.yml
+  when: ansible_distribution == "Alpine"
+
+# - include: network_archlinux.yml
+#   when: ansible_distribution == "Archlinux"
+
+- include: network_ip-forwarding.yml
+  when: network.ipForwarding is defined 
+
+- include: firewall-awall.yml
+  when: 
+    - ansible_distribution == "Alpine"
+    - network.awall.enable is defined  
+
+- include: firewall-ferm.yml
+  when: network.ferm.enable is defined 
+
+- include: ntp.yml
+  when: system.useNTP is defined
+
+- include: sudo.yml
+  when: system.enableSudo is defined
+
+- include: sshd.yml
+  when: system.enableSSH is defined
+
+- include: users.yml
+  when: system.users is defined
+
+- include: node-exporter.yml
+  when: services.prometheus_node_exporter is defined
diff --git a/roles/common/tasks/network_alpine.yml b/roles/common/tasks/network_alpine.yml
@@ -0,0 +1,49 @@
+---
+
+- name: "[Alpine] create file: /etc/network/interfaces"
+  template:
+    src: interfaces.conf.j2
+    dest: /etc/network/interfaces
+    mode: 0755
+  when: network.interfaces is defined
+
+- name: "[Alpine] Install package: vlan"
+  apk:
+    name: vlan
+    state: present
+    update_cache: yes
+  when: 
+    - network.vlanSupport is defined 
+    - network.vlanSupport is true
+
+- name: "[Alpine] Install package: bridge"
+  apk:
+    name: bridge
+    state: present
+    update_cache: yes
+  when:
+    - network.bridgeSupport is defined 
+    - network.bridgeSupport is true
+
+- name: "[Alpine] Remove package: vlan"
+  apk:
+    name: vlan
+    state: absent
+    update_cache: yes
+  when: 
+    - network.vlanSupport is defined
+    - network.vlanSupport is false  
+
+- name: "[Alpine] Remove package: bridge"
+  apk:
+    name: bridge
+    state: absent
+    update_cache: yes
+  when: 
+    - network.bridgeSupport is defined 
+    - network.bridgeSupport is false  
+
+- name: "[Alpine/OpenRC] Restart service: networking"
+  service:
+    name: networking
+    state: restarted
diff --git a/roles/common/tasks/network_general.yml b/roles/common/tasks/network_general.yml
@@ -0,0 +1,29 @@
+---
+
+- name: "create file: /etc/hostname"
+  copy:
+    content: "{{system.hostname}}"
+    dest: /etc/hostname
+  register: hostnameFile
+  when: system.hostname is defined 
+
+- name: Change hostname of running system
+  hostname:
+    name: "{{system.hostname}}"
+  when: 
+    - system.hostname is defined
+    - hostnameFile.changed
+
+- name: "create file: /etc/hosts"
+  template:
+    src: hosts.conf.j2
+    dest: /etc/hosts
+    mode: 0755
+  when: system.hostname is defined 
+
+- name: "create file: /etc/resolv.conf"
+  template:
+    src: resolv.conf.j2
+    dest: /etc/resolv.conf
+    mode: 0755
+  when: system.nameservers is defined 
diff --git a/roles/common/tasks/network_ip-forwarding.yml b/roles/common/tasks/network_ip-forwarding.yml
@@ -0,0 +1,41 @@
+---
+
+- name: "[Sysctl] Enable IPv4 forwarding"
+  ansible.posix.sysctl:
+    name: net.ipv4.ip_forward
+    value: '1'
+    sysctl_file: /etc/sysctl.d/10-ip_forward.conf
+    sysctl_set: yes
+    state: present
+    reload: yes
+  when: network.ipForwarding is true
+
+- name: "[Sysctl] Enable IPv6 forwarding"
+  ansible.posix.sysctl:
+    name: net.ipv6.conf.all.forwarding
+    value: '1'
+    sysctl_file: /etc/sysctl.d/10-ip_forward.conf
+    sysctl_set: yes
+    state: present
+    reload: yes
+  when: network.ipForwarding is true
+
+- name: "[Sysctl] Disable IPv4 forwarding"
+  ansible.posix.sysctl:
+    name: net.ipv4.ip_forward
+    value: '0'
+    sysctl_file: /etc/sysctl.d/10-ip_forward.conf
+    sysctl_set: yes
+    state: present
+    reload: yes
+  when: network.ipForwarding is false
+
+- name: "[Sysctl] Disable IPv6 forwarding"
+  ansible.posix.sysctl:
+    name: net.ipv6.conf.all.forwarding
+    value: '0'
+    sysctl_file: /etc/sysctl.d/10-ip_forward.conf
+    sysctl_set: yes
+    state: present
+    reload: yes
+  when: network.ipForwarding is false+
\ No newline at end of file
diff --git a/roles/common/tasks/node-exporter.yml b/roles/common/tasks/node-exporter.yml
@@ -0,0 +1,86 @@
+---
+
+- name: "[Alpine] Fail when not on edge"
+  fail:
+    msg: Prometheus Node-Exporter is currently only supported on Alpine's edge repos!
+  when: 
+    - ansible_distribution == "Alpine" 
+    - system.alpineVersion != "edge"
+    - services.prometheus_node_exporter.enable is true
+
+- name: "[Alpine] Install package: node-exporter"
+  apk:
+    name: prometheus-node-exporter
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Alpine"
+    - system.alpineVersion == "edge"
+    - services.prometheus_node_exporter.enable is true 
+
+- name: "[Archlinux] Install package: prometheus-node-exporter"
+  pacman:
+    name: prometheus-node-exporter
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Archlinux"
+    - services.prometheus_node_exporter.enable is true 
+
+
+- name: "[OpenRC] Enable and start service: node-exporter"
+  service:
+    name: node-exporter
+    enabled: yes
+    state: started
+  when: 
+    - ansible_service_mgr == "openrc" 
+    - system.alpineVersion == "edge"
+    - services.prometheus_node_exporter is true 
+
+- name: "[systemd] Enable and start service: prometheus-node-exporter"
+  systemd:
+    name: prometheus-node-exporter
+    state: started
+    enabled: yes
+  when: 
+    - ansible_service_mgr == "systemd"
+    - services.prometheus_node_exporter.enable is true 
+
+
+- name: "[OpenRC] Disable and stop service: node-exporter"
+  service:
+    name: node-exporter
+    enabled: no
+    state: stopped
+  when: 
+    - ansible_service_mgr == "openrc" 
+    - system.alpineVersion == "edge"
+    - services.prometheus_node_exporter is false 
+
+- name: "[systemd] Disable and stop service: prometheus-node-exporter"
+  systemd:
+    name: prometheus-node-exporter
+    state: stopped
+    enabled: no
+  when: 
+    - services.prometheus_node_exporter.enable is false 
+    - ansible_service_mgr == "systemd"
+
+
+- name: "[Alpine] Remove package: node-exporter"
+  apk:
+    name: prometheus-node-exporter
+    state: absent
+  when: 
+    - ansible_distribution == "Alpine"
+    - system.alpineVersion == "edge"
+    - services.prometheus_node_exporter.enable is false 
+
+- name: "[Archlinux] Remove package: prometheus-node-exporter"
+  pacman:
+    name: prometheus-node-exporter
+    state: absent
+  when: 
+    - ansible_distribution == "Archlinux"
+    - services.prometheus_node_exporter.enable is false 
diff --git a/roles/common/tasks/ntp.yml b/roles/common/tasks/ntp.yml
@@ -0,0 +1,39 @@
+---
+
+- name: "[Alpine] Install package: chrony" 
+  apk:
+    name: chrony 
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Alpine"
+    - system.useNTP is true
+
+- name: "[OpenRC] Enable and start service: chrony"
+  service:
+    name: chronyd
+    enabled: yes
+    state: started
+  when: 
+    - ansible_service_mgr == "openrc"
+    - system.useNTP is true
+  ignore_errors: true
+
+
+- name: "[OpenRC] Disable and stop service: chrony"
+  service:
+    name: chronyd
+    enabled: no
+    state: stopprf
+  when:
+    - ansible_service_mgr == "openrc"
+    - system.useNTP is false
+  ignore_errors: true
+
+- name: "[Alpine] Remove package: chrony" 
+  apk:
+    name: chrony 
+    state: absent
+  when: 
+    - ansible_distribution == "Alpine"
+    - system.useNTP is false
diff --git a/roles/common/tasks/packages.yml b/roles/common/tasks/packages.yml
@@ -0,0 +1,69 @@
+---
+
+- name: "[Alpine] Fail when Option 'system.alpineVersion' not set"
+  fail:
+    msg: The option 'system.alpineVersion' has to be set!
+  when:
+    - ansible_distribution == "Alpine"
+    - system.alpineVersion is not defined
+
+- name: "[Alpine] Get signature from personal repo"
+  get_url:
+    url: http://home.f2k1.de:8080/leah-5f817de5.rsa.pub
+    dest: /etc/apk/keys/leah-5f817de5.rsa.pub
+  when:
+    - ansible_distribution == "Alpine"
+    - system.enableOwnRepos is defined
+    - system.enableOwnRepos is true
+
+- name: "[Alpine] Update file: /etc/apk/repositories"
+  template:
+    src: repositories.j2
+    dest: /etc/apk/repositories
+  when:
+    - ansible_distribution == "Alpine"
+    - system.enableOwnRepos is defined 
+    - system.enableOwnRepos is true
+
+
+- name: "[Archlinux] Install package: patch"
+  pacman:
+    name: patch
+    update_cache: yes
+  when:
+    - ansible_distribution == "Archlinux"
+    - system.enableOwnRepos is defined
+    - system.enableOwnRepos is true
+
+- name: "[Archlinux] Patch file: /etc/pacman.conf (add isas aur-repo)"
+  ansible.posix.patch:
+    src: pacman.conf.patch
+    dest: /etc/pacman.conf
+  when:
+    - ansible_distribution == "Archlinux"
+    - system.enableOwnRepos is defined
+    - system.enableOwnRepos is true
+
+
+- name: "[Alpine] Update system"
+  raw: "apk update && apk upgrade"
+  when: ansible_distribution == "Alpine"
+
+- name: "[Archlinux] Update system"
+  pacman:
+    update_cache: yes
+    upgrade: yes
+  when: ansible_distribution == "Archlinux"
+
+
+- name: "[Alpine] Install common packages"
+  apk:
+    name: "{{ packages }}"
+    update_cache: yes
+  when: ansible_distribution == "Alpine"
+
+- name: "[Archlinux] Install common packages"
+  pacman:
+    name: "{{ packages }}"
+    update_cache: yes
+  when: ansible_distribution == "Archlinux"+
\ No newline at end of file
diff --git a/roles/common/tasks/sshd.yml b/roles/common/tasks/sshd.yml
@@ -0,0 +1,141 @@
+---
+
+- name: "[Alpine] Install package: openssh" 
+  apk:
+    name: openssh 
+    state: present
+    update_cache: yes
+  when:
+    - ansible_distribution == "Alpine"
+    - system.enableSSH is true
+
+- name: "[Archlinux] Install package: openssh" 
+  pacman:
+    name: openssh 
+    state: present
+    update_cache: yes
+  when:
+    - ansible_distribution == "Archlinux"
+    - system.enableSSH is true
+
+
+# need to add message that this file is managed by ansible 
+- name: "[Alpine] Patch file: /etc/ssh/sshd_config"
+  patch:
+    src: sshd/alpine-sshd_config.patch
+    dest: /etc/ssh/sshd_config
+  register: sshdConfig
+  when:
+    - ansible_distribution == "Alpine"
+    - system.enableSSH is true
+
+# need to add message that this file is managed by ansible 
+- name: "[Archlinux] Patch file: /etc/ssh/sshd_config"
+  patch:
+    src: sshd/archlinux-sshd_config.patch
+    dest: /etc/ssh/sshd_config
+  register: sshdConfig
+  when:
+    - ansible_distribution == "Archlinux"
+    - system.enableSSH is true
+
+
+- name: "[awall] Copy rule for: ssh"
+  copy:
+    src: awall/ssh.json
+    dest: /etc/awall/optional/ssh.json
+    validate: jq '.' %s
+  when:
+    - ansible_distribution == "Alpine"
+    - system.enableSSH is true
+    - network.awall.enable is true
+
+- name: "[awall] Activate rule for: ssh"
+  awall:
+    name: ssh
+    state: enabled
+    activate: yes
+  when:
+    - ansible_distribution == "Alpine"
+    - system.enableSSH is true
+    - network.awall.enable is true
+
+- name: "[OpenRC] Enable and start service: sshd"
+  service:
+    name: sshd
+    enabled: yes
+    state: started
+  when: 
+    - ansible_service_mgr == "openrc"
+    - system.enableSSH is true
+
+- name: "[systemd] Enable and start service: sshd"
+  systemd:
+    name: sshd
+    enabled: yes
+    state: started
+  when: 
+    - ansible_service_mgr == "systemd"
+    - system.enableSSH is true
+
+
+- name: "[OpenRC] Restart start service: sshd (to deploy new config)"
+  service:
+    name: sshd
+    state: started
+  when:
+    - ansible_service_mgr == "openrc" 
+    - system.enableSSH is true
+    - sshdConfig.changed
+
+- name: "[systemd] Enable and start service: sshd (to deploy new config)"
+  systemd:
+    name: sshd
+    state: restarted
+  when: 
+    - ansible_service_mgr == "systemd"
+    - system.enableSSH is true
+    - sshdConfig.changed
+
+
+
+- name: "[OpenRC] Disable and stop service: sshd"
+  service:
+    name: sshd
+    enabled: no
+    state: stopped
+  when:
+    - ansible_service_mgr == "openrc"
+    - system.enableSSH is false
+
+- name: "[systemd] Disable and stop service: sshd"
+  systemd:
+    name: sshd
+    enabled: no
+    state: stopped
+  when:
+    - ansible_service_mgr == "systemd"
+    - system.enableSSH is false
+
+
+- name: "[Alpine] Remove package: openssh" 
+  apk:
+    name: openssh 
+    state: absent
+  when:
+    - ansible_distribution == "Alpine"
+    - system.enableSSH is false
+
+- name: "[Archlinux] Remove package: openssh" 
+  pacman:
+    name: openssh 
+    state: absent
+  when: 
+    - ansible_distribution == "Archlinux"
+    - system.enableSSH is false
+
+- name: "Delete directory: /etc/ssh"
+  file:
+    path: /etc/ssh
+    mode: absent
+  when: system.enableSSH is false
diff --git a/roles/common/tasks/sudo.yml b/roles/common/tasks/sudo.yml
@@ -0,0 +1,49 @@
+---
+
+- name: "[Alpine] Install package: sudo" 
+  apk:
+    name: sudo 
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Alpine"
+    - system.enableSudo is true
+
+- name: "[Archlinux] Install package: sudo" 
+  pacman:
+    name: sudo 
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Archlinux"
+    - system.enableSudo is true
+
+
+- name: "Patch file: /etc/sudoers"
+  patch:
+    src: sudoers.patch
+    dest: /etc/sudoers
+  when: system.enableSudo is true
+
+
+- name: "[Alpine] Remove package: sudo" 
+  apk:
+    name: sudo 
+    state: absent
+  when: 
+    - ansible_distribution == "Alpine"
+    - system.enableSudo is false
+
+- name: "[Archlinux] Remove package: sudo" 
+  pacman:
+    name: sudo 
+    state: absent
+  when: 
+    - ansible_distribution == "Archlinux"
+    - system.enableSudo is false
+
+- name: "Remove file: /etc/sudoers"
+  file:
+    path: /etc/sudoers
+    mode: absent
+  when: system.enableSudo is false
diff --git a/roles/common/tasks/timezone.yml b/roles/common/tasks/timezone.yml
@@ -0,0 +1,29 @@
+---
+
+- name: "[Alpine] Install package: tzdata" 
+  apk:
+    name: tzdata 
+    state: present
+    update_cache: yes
+  when: ansible_distribution == "Alpine"
+
+- name: Check that timezone exists
+  stat:
+    path: "/usr/share/zoneinfo/{{ system.timezone }}"
+  register: timezoneCheck
+
+- fail:
+    msg: "Invalid timezone: {{ system.timezone }}"
+  when: not timezoneCheck.stat.exists
+
+- name: "[Archlinux] Set timezone to: {{ system.timezone }}"
+  community.general.timezone:
+    name: "{{ system.timezone }}"
+  when: ansible_distribution == "Archlinux"
+
+- name: "[Alpine] Set timezone to: {{system.timezone}}"
+  file:
+    src: "/usr/share/zoneinfo/{{ system.timezone }}"
+    path: /etc/localtime
+    state: link
+  when: ansible_distribution == "Alpine"
diff --git a/roles/common/tasks/users.yml b/roles/common/tasks/users.yml
@@ -0,0 +1,34 @@
+---
+
+- name: "Add groups" 
+  group:
+    name: "{{item.name}}"
+    state: present
+  loop: "{{ system.users }}"
+
+- name: "Add users" 
+  user:
+    append: yes
+    name: "{{item.name}}"
+    group: "{{item.name}}"
+    groups: "{{item.groups}}"
+    password: "{{item.password}}"
+  loop: "{{ system.users }}"
+
+- name: "Create ~/.ssh directory for users"
+  file:
+    state: directory
+    dest: "/home/{{item.name}}/.ssh/"
+    mode: 0755
+    owner: "{{item.name}}"
+    group: "{{item.name}}"
+  loop: "{{ system.users }}"
+
+- name: "Place ssh-key for users"
+  copy:
+    content: "{{item.sshKey}}"
+    dest: "/home/{{item.name}}/.ssh/authorized_keys"
+    mode: 0644    
+    owner: "{{item.name}}"
+    group: "{{item.name}}"
+  loop: "{{ system.users }}"
diff --git a/roles/common/templates/awall-baseconfig.yaml.j2 b/roles/common/templates/awall-baseconfig.yaml.j2
@@ -0,0 +1,27 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+description: Base zones and policies for {{system.hostname}}
+import:
+  - custom-services
+
+{% if network.awall.config.zones is defined %}
+zone:
+  {{ network.awall.config.zones | to_yaml | trim | indent(2) }}
+{% endif %}
+
+{% if network.awall.config.policies is defined %}
+policy:
+  {{ network.awall.config.policies | to_yaml| trim | indent(2) }}
+{% endif %}
+
+{% if network.awall.config.filters is defined %}
+filter:
+  {{ network.awall.config.filters | to_yaml | trim | indent(2) }}
+{% endif %}
+
+{% if network.awall.config.snat is defined %}
+snat:
+  - out: {{ network.awall.config.snat }}
+{% endif %}+
\ No newline at end of file
diff --git a/roles/common/templates/hosts.conf.j2 b/roles/common/templates/hosts.conf.j2
@@ -0,0 +1,11 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+{% if system.hostname == "lollo" %}
+195.39.246.32		localhost localhost.localdomain {{ system.hostname }} {{ system.hostname }}.{{ system.domain }}
+2a0f:4ac0:acab::1		localhost localhost.localdomain {{ system.hostname }} {{ system.hostname }}.{{ system.domain }}
+{% else %}
+127.0.0.1	localhost localhost.localdomain {{ system.hostname }} {{ system.hostname }}.{{ system.domain }}
+::1			localhost localhost.localdomain {{ system.hostname }} {{ system.hostname }}.{{ system.domain }}
+{% endif %}+
\ No newline at end of file
diff --git a/roles/common/templates/interfaces.conf.j2 b/roles/common/templates/interfaces.conf.j2
@@ -0,0 +1,62 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+{% for interface in network.interfaces %}
+auto {{ interface.name }}
+{% if interface.loopback is defined %}
+iface {{ interface.name }} inet loopback
+{% elif interface.manual is defined %}
+iface {{ interface.name }} inet manual
+{% else %}
+{% if interface.ipv4.dhcp is defined %}
+iface {{ interface.name }} inet dhcp
+{% else %}
+iface {{ interface.name }} inet static
+{% if system.hostname is defined %}
+	hostname {{ system.hostname }}
+{% endif %}
+{% if interface.bridge_ports is defined %}
+	bridge-ports {{interface.bridge_ports}}
+{% endif %}
+{% if interface.bridge_stp is defined %}
+{% if interface.bridge_stp is true %}
+	bridge-stp 1
+{% else %}
+	bridge-stp 0
+{% endif %}
+{% endif %}
+{% if interface.ipv4.address is defined %}
+	address {{ interface.ipv4.address }}
+{% endif %}
+{% if interface.ipv4.netmask is defined %}
+	netmask {{ interface.ipv4.netmask }}
+{% endif %}
+{% if interface.ipv4.gateway is defined %}
+	gateway {{ interface.ipv4.gateway }}
+{% endif %}
+
+{% if interface.ipv6 is defined %}
+{% if interface.ipv6.stateless is defined %}
+iface {{ interface.name }} inet6 manual
+	pre-up echo 1 > /proc/sys/net/ipv6/conf/eth0/accept_ra
+{% else %}
+iface {{ interface.name }} inet6 static
+{% if system.hostname is defined %}
+	hostname {{ system.hostname }}
+{% endif %}
+{% if interface.ipv6.address is defined %}
+	address {{ interface.ipv6.address }}
+{% endif %}
+{% if interface.ipv6.netmask is defined %}
+	netmask {{ interface.ipv6.netmask }}
+{% endif %}
+{% if interface.ipv6.gateway is defined %}
+	gateway {{ interface.ipv6.gateway }}
+{% endif %}
+{% endif %}
+{% endif %}
+{% endif %}
+{% endif %}
+
+{% endfor %}+
\ No newline at end of file
diff --git a/roles/common/templates/repositories.j2 b/roles/common/templates/repositories.j2
@@ -0,0 +1,10 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+http://home.f2k1.de:8080/alpine-pkgs
+https://dl-cdn.alpinelinux.org/alpine/{{ system.alpineVersion }}/main
+https://dl-cdn.alpinelinux.org/alpine/{{ system.alpineVersion }}/community
+{% if system.alpineVersion == "edge" %}
+https://dl-cdn.alpinelinux.org/alpine/{{ system.alpineVersion }}/testing
+{% endif %}+
\ No newline at end of file
diff --git a/roles/common/templates/resolv.conf.j2 b/roles/common/templates/resolv.conf.j2
@@ -0,0 +1,10 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+{% if system.domain is defined %}
+search {{ system.domain }}
+{% endif %}
+{% for nameserver in system.nameservers %}
+nameserver {{ nameserver }}
+{% endfor %}
diff --git a/roles/common/vars/main.yml b/roles/common/vars/main.yml
@@ -0,0 +1,14 @@
+---
+
+packages:
+  - nano
+  - sudo
+  - htop
+  - tar
+  - unzip
+  - curl 
+  - wget
+  - tmux
+  - git
+  - patch
+  - jq
diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml
@@ -0,0 +1,117 @@
+---
+
+# install it 
+- name: "[Alpine] Install package: dnsmasq" 
+  apk:
+    name: dnsmasq 
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.dnsmasq.enable is true
+
+- name: "[Archlinux] Install package: dnsmasq" 
+  pacman:
+    name: dnsmasq 
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Archlinux" 
+    - services.dnsmasq.enable is true
+
+
+# configure it 
+
+- name: "Create file: /etc/dnsmasq.conf"
+  template:
+    src: dnsmasq.conf.j2
+    dest: /etc/dnsmasq.conf
+  register: createConfig
+  when: 
+    - services.dnsmasq.enable is true
+
+
+# (re)start it
+
+- name: "[OpenRC] Enable and start service: dnsmasq"
+  service:
+    name: dnsmasq
+    enabled: yes
+    state: started
+  when: 
+    - ansible_service_mgr == "openrc"
+    - services.dnsmasq.enable is true
+
+- name: "[systemd] Enable and start service: dnsmasq"
+  systemd:
+    name: dnsmasq
+    enabled: yes
+    state: started
+  when: 
+    - ansible_service_mgr == "systemd"
+    - services.dnsmasq.enable is true
+
+- name: "[OpenRC] Restart service: dnsmasq"
+  service:
+    name: dnsmasq
+    state: restarted
+  when: 
+    - ansible_service_mgr == "openrc"
+    - services.dnsmasq.enable is true
+    - createConfig.changed
+
+- name: "[systemd] Restart service: dnsmasq"
+  systemd:
+    name: dnsmasq
+    state: restarted
+  when: 
+    - ansible_service_mgr == "systemd"
+    - services.dnsmasq.enable is true
+    - createConfig.changed
+
+
+# stop it 
+
+- name: "[OpenRC] Disable and stop service: dnsmasq"
+  service:
+    name: dnsmasq
+    enabled: no
+    state: stopped
+  when: 
+    - ansible_service_mgr == "openrc"
+    - services.dnsmasq.enable is false
+
+- name: "[systemd] Disable and stop service: dnsmasq"
+  systemd:
+    name: dnsmasq
+    enabled: no
+    state: stopped
+  when: 
+    - ansible_service_mgr == "systemd"
+    - services.dnsmasq.enable is false
+
+
+# remove it
+
+- name: "[Alpine] Remove package: dnsmasq" 
+  apk:
+    name: dnsmasq 
+    state: absent
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.dnsmasq.enable is false
+
+- name: "[Archlinux] Remove package: dnsmasq" 
+  pacman:
+    name: dnsmasq 
+    state: absent
+  when: 
+    - ansible_distribution == "Archlinux" 
+    - services.dnsmasq.enable is false
+
+- name: "Remove file: /etc/dnsmasq.conf"
+  file:
+    path: /etc/dnsmasq.conf
+    state: absent
+  when: 
+    - services.dnsmasq.enable is false
diff --git a/roles/dnsmasq/templates/dnsmasq.conf.j2 b/roles/dnsmasq/templates/dnsmasq.conf.j2
@@ -0,0 +1,105 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+{% if services.dnsmasq.local_service is defined and services.dnsmasq.local_service is true  %}
+local-service
+{% endif %}
+
+{% if services.dnsmasq.no_resolv is defined and services.dnsmasq.no_resolv is true  %}
+no-resolv
+{% endif %}
+
+{% if services.dnsmasq.domain_needed is defined and services.dnsmasq.domain_needed is true  %}
+domain-needed
+{% endif %}
+
+{% if services.dnsmasq.bogus_priv is defined and services.dnsmasq.bogus_priv is true  %}
+bogus-priv
+{% endif %}
+
+{% if services.dnsmasq.expand_hosts is defined and services.dnsmasq.expand_hosts is true  %}
+expand-hosts
+{% endif %}
+
+{% if services.dnsmasq.addn_hosts is defined %}
+addn-hosts={{services.dnsmasq.addn_hosts}}
+{% endif %}
+
+{% if services.dnsmasq.read_ethers is defined and services.dnsmasq.read_ethers is true  %}
+read-ethers
+{% endif %}
+
+{% if services.dnsmasq.enable_ra is defined and services.dnsmasq.enable_ra is true  %}
+enable-ra
+{% endif %}
+
+{% if services.dnsmasq.quiet_ra is defined and services.dnsmasq.quiet_ra is true  %}
+quiet-ra
+{% endif %}
+
+{% for dns_server in services.dnsmasq.dns_servers %}
+server={{ dns_server }}
+{% endfor %}
+
+{% if services.dnsmasq.domain is defined %}
+# allow /etc/hosts and dhcp lookups for local domains
+local=/{{ services.dnsmasq.domain }}/
+domain={{ services.dnsmasq.domain }}
+{% endif %}
+
+{% if services.dnsmasq.auth_zone is defined %}
+{% for auth_zone in services.dnsmasq.auth_zone %}
+auth-zone={{ auth_zone }}
+{% endfor %}
+{% endif %}
+
+{% if services.dnsmasq.local_addresses is defined %}
+{% for address in services.dnsmasq.local_addresses %}
+address={{ address }}
+{% endfor %}
+{% endif %}
+
+{% if services.dnsmasq.addresses is defined %}
+{% for address in services.dnsmasq.addresses %}
+host-record={{ address }}
+{% endfor %}
+{% endif %}
+
+
+{% if services.dnsmasq.dhcp is defined %}
+{% if services.dnsmasq.dhcp.authoritative is defined and services.dnsmasq.dhcp.authoritative is true  %}
+dhcp-authoritative
+{% endif %}
+
+{% if services.dnsmasq.dhcp.rapid_commit is defined and services.dnsmasq.dhcp.rapid_commit is true  %}
+dhcp-rapid-commit
+{% endif %}
+
+{% if services.dnsmasq.dhcp.sequential_ip is defined and services.dnsmasq.dhcp.sequential_ip is true  %}
+dhcp-sequential-ip
+{% endif %}
+
+{% if services.dnsmasq.dhcp.ranges is defined %}
+{% for dhcp_range in services.dnsmasq.dhcp.ranges %}
+dhcp-range={{ dhcp_range }}
+{% endfor %}
+{% endif %}
+
+{% if services.dnsmasq.dhcp.options is defined %}
+{% for dhcp_option in services.dnsmasq.dhcp.options %}
+dhcp-option={{ dhcp_option}}
+{% endfor %}
+{% endif %}
+
+{% if services.dnsmasq.dhcp.hosts is defined %}
+{% for dhcp_host in services.dnsmasq.dhcp.hosts %}
+dhcp-host={{ dhcp_host }}
+{% endfor %}
+{% endif %}
+
+{% endif %}
+
+{% if services.dnsmasq.extraConfig is defined %}
+{{ services.dnsmasq.extraConfig }}
+{% endif %}+
\ No newline at end of file
diff --git a/roles/frp/tasks/frpc.yml b/roles/frp/tasks/frpc.yml
@@ -0,0 +1,64 @@
+---
+
+- name: "[Alpine] Install package: frp"
+  apk:
+    name: frp
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Alpine" 
+
+- name: "[Archlinux] Install package: frps"
+  pacman:
+    name: frps
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Archlinux"
+
+- name: "Create directory: /etc/frp"
+  file:
+    path: /etc/frp
+    state: directory
+  when: 
+    - ansible_distribution == "Archlinux"
+
+- copy:
+    content: '# Configuration for /etc/init.d/frpc\nfrps_opts="-c /etc/frpc.ini"'
+    dest: /etc/conf.d/frpc
+    mode: 0644
+  when: 
+    - ansible_distribution == "Alpine" 
+
+- name: "[Alpine] Generate config for frpc"
+  template:
+    src: frpc.conf.j2
+    dest: /etc/frpc.ini
+    owner: frpc
+    group: frpc
+    mode: 0644
+  when: 
+    - ansible_distribution == "Alpine" 
+
+- name: "[Archlinux] Generate config for frpc"
+  template:
+    src: frpc.conf.j2
+    dest: /etc/frp/frpc.ini
+  when: 
+    - ansible_distribution == "Archlinux" 
+
+- name: "[OpenRC] Restart and enable service: frpc"
+  service:
+    name: frpc
+    state: restarted
+    enabled: yes
+  when: 
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Restart and enable service: frpc"
+  systemd:
+    name: frpc
+    state: restarted
+    enabled: yes
+  when: 
+    - ansible_service_mgr == "systemd"
diff --git a/roles/frp/tasks/frpc_checks.yml b/roles/frp/tasks/frpc_checks.yml
@@ -0,0 +1,37 @@
+---
+
+- connection: local
+  fail: msg="Option 'services.frpc.serverAddress' has to be set!"
+  when:
+    - services.frpc.serverAddress is not defined
+
+- fail: msg="Option 'services.frpc.serverPort' has to be set!"
+  when:
+    - services.frpc.serverPort is not defined
+
+- fail: msg="Option 'services.frpc.token' has to be set!"
+  when:
+    - services.frpc.token is not defined
+
+- fail: msg="Option 'services.frpc.tunnels' has to be set!"
+  when:
+    - services.frpc.tunnels is not defined
+
+
+- fail: msg="Option 'services.frpc.dashboard.port' has to be set when 'services.frpc.dashboard.enable' is true!"
+  when:
+    - services.frpc.dashboard.enable is defined
+    - services.frpc.dashboard.enable is true
+    - services.frpc.dashboard.port is not defined
+
+- fail: msg="Option 'services.frpc.dashboard.user' has to be set when 'services.frpc.dashboard.enable' is true!"
+  when:
+    - services.frpc.dashboard.enable is defined
+    - services.frpc.dashboard.enable is true
+    - services.frpc.dashboard.user is not defined
+
+- fail: msg="Option 'services.frpc.dashboard.passwd' has to be set when 'services.frpc.dashboard.enable' is true!"
+  when:
+    - services.frpc.dashboard.enable is defined
+    - services.frpc.dashboard.enable is true
+    - services.frpc.dashboard.passwd is not defined+
\ No newline at end of file
diff --git a/roles/frp/tasks/frps.yml b/roles/frp/tasks/frps.yml
@@ -0,0 +1,75 @@
+---
+
+- name: "[Alpine] Install package: frp"
+  apk:
+    name: frp
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Alpine" 
+
+- name: "[Archlinux] Install package: frps"
+  pacman:
+    name: frps
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Archlinux"
+
+- name: "Create directory: /etc/frp"
+  file:
+    path: /etc/frp
+    state: directory
+  when: 
+    - ansible_distribution == "Archlinux"
+
+- name: "[awall] Create rule for: frps"
+  template:
+    src: awall-rule.json.j2
+    dest: /etc/awall/optional/frps.json
+    validate: jq '.' %s
+  when: 
+    - ansible_distribution == "Alpine" 
+    - network.awall.enable is true
+
+- name: "[awall] Enable rule for: frps"
+  awall:
+    name: frps
+    state: enabled
+    activate: yes
+  when: 
+    - ansible_distribution == "Alpine" 
+    - network.awall.enable is true
+
+- name: "[OpenRC] Generate frps service config"
+  template:
+    src: frps.confd.j2
+    dest: /etc/conf.d/frps
+    mode: 0644
+  when:
+    - ansible_service_mgr == "openrc"
+
+- name: "[Archlinux] Generate frps config"
+  template:
+    src: frps.ini.j2
+    dest: /etc/frp/frps.ini
+    mode: 0644
+  when:
+    - ansible_distribution == "Archlinux"
+
+
+- name: "[OpenRC] Enable and restart service: frps"
+  service:
+    name: frps
+    state: restarted
+    enabled: yes
+  when:
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Enable and restart service: frps"
+  systemd:
+    name: frps
+    state: restarted
+    enabled: yes
+  when:
+    - ansible_service_mgr == "systemd"
diff --git a/roles/frp/tasks/frps_checks.yml b/roles/frp/tasks/frps_checks.yml
@@ -0,0 +1,48 @@
+---
+
+- fail: msg="Option 'services.frps.token' has to be set!"
+  when:
+    - services.frps.token is not defined
+
+- fail: msg="Option 'services.frps.port' has to be set!"
+  when:
+    - services.frps.port is not defined
+
+- fail: msg="Option 'services.frps.vhostDomain' has to be set!"
+  when:
+    - services.frps.vhostDomain is not defined
+
+- fail: msg="Option 'services.frps.vhostPort' has to be set!"
+  when:
+    - services.frps.vhostPort is not defined
+
+
+
+
+- fail: msg="Option 'services.frps.nginx.vhosts' has to be set when using nginx!"
+  when:
+    - services.frps.nginx.enable is defined
+    - services.frps.nginx.enable is true
+    - services.frps.nginx.vhosts is not defined
+
+- fail: msg="Option 'services.frps.nginx.sslOnly' has to be set when using nginx!"
+  when:
+    - services.frps.nginx.enable is defined
+    - services.frps.nginx.enable is true
+    - services.frps.nginx.sslOnly is not defined
+
+- fail: msg="Option 'services.frps.nginx.ssl.cert' has to be set when using nginx with ssl!"
+  when:
+    - services.frps.nginx.enable is defined
+    - services.frps.nginx.enable is true
+    - services.frps.nginx.ssl.enable is defined
+    - services.frps.nginx.ssl.enable is true
+    - services.frps.nginx.ssl.cert is not defined
+
+- fail: msg="Option 'services.frps.nginx.ssl.privkey' has to be set when using nginx with ssl!"
+  when:
+    - services.frps.nginx.enable is defined
+    - services.frps.nginx.enable is true
+    - services.frps.nginx.ssl.enable is defined
+    - services.frps.nginx.ssl.enable is true
+    - services.frps.nginx.ssl.privkey is not defined
diff --git a/roles/frp/tasks/frps_nginx.yml b/roles/frp/tasks/frps_nginx.yml
@@ -0,0 +1,23 @@
+---
+
+- name: "[nginx] Create vhost" 
+  template: 
+    src: nginx-vhost.conf.j2
+    dest: /etc/nginx/conf.d/frps.conf
+    mode: 0644
+    owner: nginx
+    group: nginx
+
+- name: "[OpenRC] Restart service: nginx"
+  service:
+    name: nginx
+    state: restarted
+  when:
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Restart service: nginx"
+  systemd:
+    name: nginx
+    state: restarted
+  when:
+    - ansible_service_mgr == "systemd"+
\ No newline at end of file
diff --git a/roles/frp/tasks/main.yml b/roles/frp/tasks/main.yml
@@ -0,0 +1,33 @@
+---
+
+- fail: msg="This Role only works when Option 'system.enableOwnRepos' is true!"
+  when:
+    - (services.frps.enable is defined and services.frps.enable is true) or (services.frpc.enable is defined and services.frpc.enable is true)
+    - system.enableOwnRepos is false
+
+- include: frps_checks.yml
+  when:
+    - services.frps.enable is defined
+    - services.frps.enable is true
+
+- include: frpc_checks.yml
+  when:
+    - services.frpc.enable is defined
+    - services.frpc.enable is true
+
+- include: frps.yml
+  when:
+    - services.frps.enable is defined
+    - services.frps.enable is true 
+
+- include: frps_nginx.yml
+  when:
+    - services.frps.enable is defined
+    - services.frps.enable is true 
+    - services.frps.nginx.enable is defined
+    - services.frps.nginx.enable is true 
+
+- include: frpc.yml
+  when:
+    - services.frpc.enable is defined 
+    - services.frpc.enable is true +
\ No newline at end of file
diff --git a/roles/frp/templates/awall-rule.json.j2 b/roles/frp/templates/awall-rule.json.j2
@@ -0,0 +1,13 @@
+{
+  "description": "Allow FRPS on WAN",
+  "import": [ "base" ],
+
+  "filter": [
+    {
+      "in": "WAN",
+      "out": "_fw",
+      "service": { "proto": "tcp", "port": {{ services.frps.port }} },
+      "action": "accept"
+    }
+  ]
+}+
\ No newline at end of file
diff --git a/roles/frp/templates/frpc.conf.j2 b/roles/frp/templates/frpc.conf.j2
@@ -0,0 +1,37 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+[common]
+server_addr = {{ services.frpc.serverAddress }}
+server_port = {{ services.frpc.serverPort }}
+token       = {{ services.frpc.token }}
+{% if services.frpc.dashboard.enable is defined and services.frpc.dashboard.enable is true %}
+dashboard_port = {{ services.frpc.dashboard.port }}
+dashboard_user = {{ services.frpc.dashboard.user }}
+dashboard_pwd  = {{ services.frpc.dashboard.passwd }}
+{% endif %}
+
+{% for tunnel in services.frpc.tunnels %}
+[{{ tunnel.name }}]
+type = {{ tunnel.type }}
+{% if tunnel.local_ip is defined %}
+local_ip = {{ tunnel.local_ip }}
+{% endif %}
+{% if tunnel.local_port is defined %}
+local_port = {{ tunnel.local_port }}
+{% endif %}
+{% if tunnel.remote_port is defined %}
+remote_port = {{ tunnel.remote_port }}
+{% endif %}
+{% if tunnel.custom_domains is defined %}
+custom_domains = {{ tunnel.custom_domains }}
+{% endif %}
+{% if tunnel.subdomain is defined %}
+subdomain = {{ tunnel.subdomain }}
+{% endif %}
+{% if tunnel.locations is defined %}
+locations = {{ tunnel.locations }}
+{% endif %}
+
+{% endfor %}+
\ No newline at end of file
diff --git a/roles/frp/templates/frps.confd.j2 b/roles/frp/templates/frps.confd.j2
@@ -0,0 +1,6 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+# Configuration for /etc/init.d/frps
+frps_opts="--token={{ services.frps.token }} --bind_port={{ services.frps.port }} --subdomain_host={{ services.frps.vhostDomain }} --vhost_http_port={{ services.frps.vhostPort }}"
diff --git a/roles/frp/templates/frps.ini.j2 b/roles/frp/templates/frps.ini.j2
@@ -0,0 +1,10 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+[common]
+bind_port = {{ services.frps.port }}
+vhost_http_port = {{ services.frps.vhostPort }}
+authentication_method = token
+token = {{ services.frps.token }}
+subdomain_host = {{ services.frps.vhostDomain }}
diff --git a/roles/frp/templates/nginx-vhost.conf.j2 b/roles/frp/templates/nginx-vhost.conf.j2
@@ -0,0 +1,37 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+{% for vhost in services.frps.nginx.vhosts %}
+{% if  services.frps.nginx.sslOnly is not defined or services.frps.nginx.sslOnly is false %}
+server {
+	listen 80 ;
+	listen [::]:80;
+	
+	server_name {{ vhost }}.{{ services.frps.vhostDomain }};
+
+	location / {
+		proxy_pass http://127.0.0.1:{{ services.frps.vhost_port }}/;
+		include /etc/nginx/proxy.conf;
+	}
+}
+
+{% endif %}
+{% if services.frps.nginx.ssl.enable is true %}
+server {
+	listen 443 ssl;
+	listen [::]:443 ssl;
+
+	ssl_certificate "{{ services.frps.nginx.ssl.cert }}";
+	ssl_certificate_key "{{ services.frps.nginx.ssl.privkey }}";
+	include /etc/nginx/ssl.conf;
+	
+	server_name {{ vhost }}.{{ services.frps.vhostDomain }};
+
+	location / {
+		proxy_pass http://127.0.0.1:{{ services.frps.vhostPort }}/;
+		include /etc/nginx/proxy.conf;
+	}
+}
+{% endif %}
+{% endfor %}
diff --git a/alpine/config-files/gitolite/gitolite.rc.patch b/roles/gitolite/files/gitolite.rc.patch
diff --git a/roles/gitolite/tasks/main.yml b/roles/gitolite/tasks/main.yml
@@ -0,0 +1,106 @@
+---
+
+# fail when needed options not set
+
+- fail: msg="Option 'gitolite.initalKey' has to be defined!"
+  when:
+    - services.gitolite.enable is true
+    - services.gitolite.initialKey is not defined
+
+- fail: msg="Option 'system.enableSSH' has to be true! Gitolite needs SSH!"
+  when:
+    - services.gitolite.enable is true
+    - system.enableSSH is not defined or system.enableSSH is false
+
+
+#install it
+
+- name: "[Alpine] Install package: gitolite"
+  apk:
+    name: gitolite, git
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.gitolite.enable is true
+
+- name: "[Archlinux] Install package: gitolite"
+  apk:
+    name: gitolite, git
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Archlinux" 
+    - services.gitolite.enable is true
+
+
+# configure it
+
+- name: copy initial ssh-key to destination host
+  copy:
+    content: "{{ services.gitolite.initialKey }}"
+    dest: /var/lib/git/first-user-key.pub
+    owner: git
+    group: git
+  when:
+    - services.gitolite.enable is true
+    - services.gitolite.initialKey is defined
+
+- name: Initial setup of gitolite
+  become: yes
+  become_user: git
+  command:
+    cmd: gitolite setup -pk /var/lib/git/first-user-key.pub
+    creates: /var/lib/git/.gitolite
+  when:
+    - services.gitolite.enable is true
+
+- name: Delete first-user-key.pub
+  file:
+    path: /var/lib/git/first-user-key.pub
+    state: absent
+  when:
+    - services.gitolite.enable is true
+
+- name: Unlock the git user
+  ignore_errors: yes
+  command:
+    cmd: passwd -u git
+  when:
+    - services.gitolite.enable is true
+
+- name: fix gitolite.rc to set correct permissons
+  patch:
+    src: gitolite.rc.patch
+    dest: /var/lib/git/.gitolite.rc
+  when:
+    - services.gitolite.enable is true
+
+- name: set permissions for git dir
+  file:
+    path: /var/lib/git
+    state: directory  
+    mode: 0755
+    owner: git
+    group: git
+  when:
+    - services.gitolite.enable is true
+
+
+#remove it
+
+- name: "[Alpine] Remove package: gitolite"
+  apk:
+    name: gitolite
+    state: absent
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.gitolite.enable is false
+
+- name: "[Archlinux] Remove package: gitolite"
+  apk:
+    name: gitolite
+    state: absent
+  when: 
+    - ansible_distribution == "Archlinux" 
+    - services.gitolite.enable is false+
\ No newline at end of file
diff --git a/roles/grafana/tasks/checks.yml b/roles/grafana/tasks/checks.yml
@@ -0,0 +1,34 @@
+---
+
+- fail: msg="Option 'services.grafana.configFile' has to be set!"
+  when:
+    - services.grafana.configFile is not defined
+
+
+- fail: msg="Option 'services.grafana.nginx.domain' has to be set when using nginx!"
+  when:
+    - services.grafana.nginx.enable is defined
+    - services.grafana.nginx.enable is true
+    - services.grafana.nginx.domain is not defined
+
+- fail: msg="Option 'services.grafana.nginx.sslOnly' has to be set when using nginx!"
+  when:
+    - services.grafana.nginx.enable is defined
+    - services.grafana.nginx.enable is true
+    - services.grafana.nginx.sslOnly is not defined
+
+- fail: msg="Option 'services.grafana.nginx.ssl.cert' has to be set when using nginx with ssl!"
+  when:
+    - services.grafana.nginx.enable is defined
+    - services.grafana.nginx.enable is true
+    - services.grafana.nginx.ssl.enable is defined
+    - services.grafana.nginx.ssl.enable is true
+    - services.grafana.nginx.ssl.cert is not defined
+
+- fail: msg="Option 'services.grafana.nginx.ssl.privkey' has to be set when using nginx with ssl!"
+  when:
+    - services.grafana.nginx.enable is defined
+    - services.grafana.nginx.enable is true
+    - services.grafana.nginx.ssl.enable is defined
+    - services.grafana.nginx.ssl.enable is true
+    - services.grafana.nginx.ssl.privkey is not defined
diff --git a/roles/grafana/tasks/configure.yml b/roles/grafana/tasks/configure.yml
@@ -0,0 +1,22 @@
+---
+
+- name: "Copy config-file to: /etc/grafana.ini"
+  copy: 
+    src: "{{ services.grafana.configFile }}"
+    dest: /etc/grafana.ini
+    mode: 0755
+
+- name: "Remove directory: /var/lib/grafana/provisioning"
+  file:
+    path: /var/lib/grafana/provisioning
+    state: absent
+
+- name: "Copy provisioning data to: /var/lib/grafana/provisioning"
+  copy: 
+    src: "{{ services.grafana.provisioningPath }}"
+    dest: /var/lib/grafana/
+    mode: 0755
+    owner: grafana
+    group: grafana
+  when:
+    - services.grafana.provisioningPath is defined+
\ No newline at end of file
diff --git a/roles/grafana/tasks/install.yml b/roles/grafana/tasks/install.yml
@@ -0,0 +1,17 @@
+---
+
+- name: "[Alpine] Install package: grafana"
+  apk:
+    name: grafana
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Alpine" 
+
+- name: "[Archlinux] Install package: grafana"
+  pacman:
+    name: grafana
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Archlinux" 
diff --git a/roles/grafana/tasks/main.yml b/roles/grafana/tasks/main.yml
@@ -0,0 +1,38 @@
+---
+
+- include: checks.yml
+  when:
+    - services.grafana.enable is defined
+    - services.grafana.enable is true
+
+- include: install.yml
+  when:
+    - services.grafana.enable is defined
+    - services.grafana.enable is true
+
+- include: configure.yml
+  when:
+    - services.grafana.enable is defined
+    - services.grafana.enable is true
+
+- include: start.yml
+  when:
+    - services.grafana.enable is defined
+    - services.grafana.enable is true
+
+- include: nginx.yml
+  when:
+    - services.grafana.enable is defined
+    - services.grafana.enable is true
+    - services.grafana.nginx is defined
+    - services.grafana.nginx.enable is true
+
+- include: remove.yml
+  when:
+    - services.grafana.enable is defined
+    - services.grafana.enable is false
+
+
+
+
+
diff --git a/roles/grafana/tasks/nginx.yml b/roles/grafana/tasks/nginx.yml
@@ -0,0 +1,23 @@
+---
+
+- name: "[nginx] Create vhost" 
+  template:
+    src: nginx-vhost.conf.j2
+    dest: /etc/nginx/conf.d/grafana.conf
+    mode: 0644
+    owner: nginx
+    group: nginx
+
+- name: "[OpenRC] Restart service: nginx"
+  service:
+    name: nginx
+    state: restarted
+  when: 
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Restart service: nginx"
+  systemd:
+    name: nginx
+    state: restarted
+  when: 
+    - ansible_service_mgr == "systemd"
diff --git a/roles/grafana/tasks/remove.yml b/roles/grafana/tasks/remove.yml
@@ -0,0 +1,40 @@
+---
+
+- name: "[OpenRC] Disable and stop service: grafana"
+  service:
+    name: grafana
+    enabled: no
+    state: stopped
+  when:
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Disable and stop service: grafana"
+  systemd:
+    name: grafana
+    enabled: no
+    state: stopped
+  when:
+    - ansible_service_mgr == "systemd"
+
+- name: "[Alpine] Remove package: grafana"
+  apk:
+    name: grafana
+    state: absent
+  when:
+    - ansible_distribution == "Alpine" 
+
+- name: "[Archlinux] Remove package: grafana"
+  pacman:
+    name: grafana
+    state: absent
+  when:
+    - ansible_distribution == "Archlinux" 
+
+- name: "Delete leftovers"
+  file:
+    path: "{{item}}"
+    state: absent
+  with_items:
+    - /etc/nginx/conf.d/grafana.conf
+    - /etc/grafana.ini
+    - /var/lib/grafana/provisioning+
\ No newline at end of file
diff --git a/roles/grafana/tasks/start.yml b/roles/grafana/tasks/start.yml
@@ -0,0 +1,17 @@
+---
+
+- name: "[OpenRC] Restart and enable service: grafana"
+  service:
+    name: grafana
+    enabled: yes
+    state: restarted
+  when: 
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Restart and enable service: grafana"
+  systemd:
+    name: grafana
+    enabled: yes
+    state: restarted
+  when: 
+    - ansible_service_mgr == "systemd"+
\ No newline at end of file
diff --git a/roles/grafana/templates/nginx-vhost.conf.j2 b/roles/grafana/templates/nginx-vhost.conf.j2
@@ -0,0 +1,33 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+{% if  services.grafana.nginx.sslOnly is not defined or services.grafana.nginx.sslOnly is false %}
+server {
+	listen 80 ;
+	listen [::]:80;
+	
+	server_name {{ services.grafana.nginx.domain }};
+
+	location / {
+		proxy_pass http://localhost:3000/;
+	}
+}
+
+{% endif %}
+{% if services.grafana.nginx.ssl.enable is true %}
+server {
+	listen 443 ssl;
+	listen [::]:443 ssl;
+
+	ssl_certificate "{{ services.grafana.nginx.ssl.cert }}";
+	ssl_certificate_key "{{ services.grafana.nginx.ssl.privkey }}";
+	include /etc/nginx/ssl.conf;
+	
+	server_name {{ services.grafana.nginx.domain }};
+
+	location / {
+		proxy_pass http://localhost:3000/;
+	}
+}
+{% endif %}
diff --git a/roles/hostapd/tasks/main.yml b/roles/hostapd/tasks/main.yml
@@ -0,0 +1,119 @@
+---
+
+# install it 
+- name: "[Alpine] Install package: hostapd" 
+  apk:
+    name: hostapd 
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.hostapd.enable is true
+
+- name: "[Archlinux] Install package: hostapd" 
+  pacman:
+    name: hostapd 
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Archlinux" 
+    - services.hostapd.enable is true
+
+
+# configure it 
+
+- name: "Create file: /etc/hostapd/hostapd.conf"
+  template:
+    src: hostapd.conf.j2
+    dest: /etc/hostapd/hostapd.conf
+  register: createConfig
+  when: 
+    - services.hostapd.enable is true
+
+
+#todo: fix systemd service to wait for device
+
+# (re)start it
+
+- name: "[OpenRC] Enable and start service: hostapd"
+  service:
+    name: hostapd
+    enabled: yes
+    state: started
+  when: 
+    - ansible_service_mgr == "openrc"
+    - services.hostapd.enable is true
+
+- name: "[systemd] Enable and start service: hostapd"
+  systemd:
+    name: hostapd
+    enabled: yes
+    state: started
+  when: 
+    - ansible_service_mgr == "systemd"
+    - services.hostapd.enable is true
+
+- name: "[OpenRC] Restart service: hostapd"
+  service:
+    name: hostapd
+    state: restarted
+  when: 
+    - ansible_service_mgr == "openrc"
+    - services.hostapd.enable is true
+    - createConfig.changed
+
+- name: "[systemd] Restart service: hostapd"
+  systemd:
+    name: hostapd
+    state: restarted
+  when: 
+    - ansible_service_mgr == "systemd"
+    - services.hostapd.enable is true
+    - createConfig.changed
+
+
+# stop it 
+
+- name: "[OpenRC] Disable and stop service: hostapd"
+  service:
+    name: hostapd
+    enabled: no
+    state: stopped
+  when: 
+    - ansible_service_mgr == "openrc"
+    - services.hostapd.enable is false
+
+- name: "[systemd] Disable and stop service: hostapd"
+  systemd:
+    name: hostapd
+    enabled: no
+    state: stopped
+  when: 
+    - ansible_service_mgr == "systemd"
+    - services.hostapd.enable is false
+
+
+# remove it
+
+- name: "[Alpine] Remove package: hostapd" 
+  apk:
+    name: hostapd 
+    state: absent
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.hostapd.enable is false
+
+- name: "[Archlinux] Remove package: hostapd" 
+  pacman:
+    name: hostapd 
+    state: absent
+  when: 
+    - ansible_distribution == "Archlinux" 
+    - services.hostapd.enable is false
+
+- name: "Remove directory: /etc/hostapd"
+  file:
+    path: /etc/hostapd
+    state: absent
+  when: 
+    - services.hostapd.enable is false
diff --git a/roles/hostapd/templates/hostapd.conf.j2 b/roles/hostapd/templates/hostapd.conf.j2
@@ -0,0 +1,27 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+interface={{ services.hostapd.interface }}
+bridge={{ services.hostapd.bridge }}
+
+ssid={{ services.hostapd.ssid }}
+driver=nl80211
+country_code=DE
+
+hw_mode=g
+channel={{ services.hostapd.channel }}
+
+wpa=2
+auth_algs=1
+
+rsn_pairwise=CCMP
+wpa_key_mgmt=WPA-PSK
+wpa_passphrase={{ services.hostapd.passphrase }}
+
+logger_stdout=-1
+logger_stdout_level=2
+
+ieee80211n=1
+wmm_enabled=1
+ht_capab=[HT40+]
diff --git a/roles/kawaidesu.ansible_networkd/CHANGELOG.md b/roles/kawaidesu.ansible_networkd/CHANGELOG.md
@@ -0,0 +1,13 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [0.1.1] - 2020-06-04
+### Fixed
+- Most ansible-lint warnings from galaxy.ansible.com
+    
+## [0.1.0] - 2020-06-04
+### Added
+- Inital release
diff --git a/roles/kawaidesu.ansible_networkd/LICENSE b/roles/kawaidesu.ansible_networkd/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 Oleg "Zmey!" Vasiliev
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/roles/kawaidesu.ansible_networkd/README.md b/roles/kawaidesu.ansible_networkd/README.md
@@ -0,0 +1,74 @@
+networkd 
+=========
+
+An Ansible role for configuring systemd-networkd.
+
+This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+Requirements
+------------
+
+This role assumes that networkd is already present in the system. So it should be suitable for any distro with networkd.
+
+Role Variables
+--------------
+
+Example configuration. Follow networkd documentation to construct yours.
+```yaml
+networkd:
+ link:
+     # This is file name
+   - name: eth0
+     # This is prefix for file. This results in following file name: 50-eth0.link
+     priority: 50
+     content:
+       - Match:
+         - MACAddress: "aa:bb:cc:dd:ee:ff"
+       - Link:
+         - Name: eth0
+ netdev:
+   - name: br0
+     priority: 50
+     content:
+       - NetDev:
+         - Name: br0
+         - Kind: bridge
+ network:
+   - name: eth0
+     priority: 50
+     content:
+       - Match:
+         - Name: eth0
+       - Network:
+         - DHCP: ipv4
+         - LinkLocalAddressing: no
+         - LLDP: yes
+       - DHCPv4:
+         - UseHostname: no
+         - Hostname: gimme-some-addr
+         - UseMTU: yes
+   - name: br0_slaves
+     priority: 50
+     content:
+       - Match:
+         - MACAddress: "11:bb:cc:dd:ee:ff 22:bb:cc:dd:ee:ff"
+       - Network:
+         - Bridge: br0
+```
+
+What to do on configuration changes. Could be "restart", "reload" or "nothing". Variable is mandatory.
+```yaml
+networkd_apply_action: "restart"
+```
+
+Custom content for `/etc/resolv.conf`. Every element in list is string in file. Variable is optional.
+```yaml
+networkd_resolv_conf_content:
+  - nameserver 1.1.1.1
+  - nameserver 8.8.8.8
+```
+
+License
+-------
+
+MIT
diff --git a/roles/kawaidesu.ansible_networkd/defaults/main.yml b/roles/kawaidesu.ansible_networkd/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+
+networkd:
+
+# Could be "restart", "reload" or "nothing"
+networkd_apply_action: "restart"
diff --git a/roles/kawaidesu.ansible_networkd/handlers/main.yml b/roles/kawaidesu.ansible_networkd/handlers/main.yml
@@ -0,0 +1,17 @@
+---
+
+- name: networkd | Do restart
+  systemd:
+    name: systemd-networkd
+    state: restarted
+
+# From man:
+# Reload .netdev and .network files. If a new .netdev file is found, then the corresponding netdev is created.
+# Note that even if an existing .netdev is modified or removed, systemd-networkd does not update or remove the netdev.
+# If a new, modified or removed .network file is found, then all interfaces which match the file are reconfigured.
+- name: networkd | Do reload
+  shell: networkctl reload
+
+- name: networkd | Do nothing
+  debug:
+    msg: "Not applying new configuration due to selected action."
diff --git a/roles/kawaidesu.ansible_networkd/meta/.galaxy_install_info b/roles/kawaidesu.ansible_networkd/meta/.galaxy_install_info
@@ -0,0 +1,2 @@
+install_date: Wed Dec  9 09:17:04 2020
+version: 0.1.1
diff --git a/roles/kawaidesu.ansible_networkd/meta/main.yml b/roles/kawaidesu.ansible_networkd/meta/main.yml
@@ -0,0 +1,30 @@
+---
+galaxy_info:
+  author: 'Oleg "Zmey!" Vasiliev'
+  description: An Ansible role for configuring systemd-networkd
+
+  license: MIT
+
+  min_ansible_version: 2.4
+
+  # Totally not a full list...
+  platforms:
+    - name: Archlinux
+      versions: all
+    - name: Ubuntu
+      versions:
+        - xenial
+        - boinic
+        - focal
+    - name: Debian
+      versions:
+        - jessie
+        - stretch
+        - buster
+
+  galaxy_tags:
+    - networking
+    - networkd
+    - systemd
+
+dependencies: []
diff --git a/roles/kawaidesu.ansible_networkd/tasks/deploy_configs.yml b/roles/kawaidesu.ansible_networkd/tasks/deploy_configs.yml
@@ -0,0 +1,34 @@
+---
+
+- name: networkd | Deploy .link configs
+  template:
+    src: networkd.j2
+    dest: "/etc/systemd/network/{{ item.priority }}-{{ item.name }}.link"
+    mode: 0644
+    owner: root
+    group: root
+  with_items: "{{ networkd.link | default([]) }}"
+  register: networkd_deployed_link
+  notify: networkd | Do {{ networkd_apply_action }}
+
+- name: networkd | Deploy .netdev configs
+  template:
+    src: networkd.j2
+    dest: "/etc/systemd/network/{{ item.priority }}-{{ item.name }}.netdev"
+    mode: 0644
+    owner: root
+    group: root
+  with_items: "{{ networkd.netdev | default([]) }}"
+  register: networkd_deployed_netdev
+  notify: networkd | Do {{ networkd_apply_action }}
+
+- name: networkd | Deploy .network configs
+  template:
+    src: networkd.j2
+    dest: "/etc/systemd/network/{{ item.priority }}-{{ item.name }}.network"
+    mode: 0644
+    owner: root
+    group: root
+  with_items: "{{ networkd.network | default([]) }}"
+  register: networkd_deployed_network
+  notify: networkd | Do {{ networkd_apply_action }}
diff --git a/roles/kawaidesu.ansible_networkd/tasks/main.yml b/roles/kawaidesu.ansible_networkd/tasks/main.yml
@@ -0,0 +1,30 @@
+---
+
+- name: Check role vars
+  assert:
+    that:
+      - networkd_apply_action == "restart" or networkd_apply_action == "reload" or networkd_apply_action == "nothing"
+    fail_msg: 'networkd_apply_action shoud be "restart", "reload" or "nothing"'
+    quiet: yes
+
+- import_tasks: deploy_configs.yml
+
+- import_tasks: remove_unmanaged.yml
+
+- name: networkd | Deploy resolv.conf
+  template:
+    src: resolv.conf.j2
+    dest: /etc/resolv.conf
+    mode: 0644
+    owner: root
+    group: root
+  when: networkd_resolv_conf_content is defined
+
+- name: networkd | Enable and start service
+  systemd:
+    name: systemd-networkd
+    state: started
+    enabled: yes
+
+- name: networkd | Run handlers now
+  meta: flush_handlers
diff --git a/roles/kawaidesu.ansible_networkd/tasks/remove_unmanaged.yml b/roles/kawaidesu.ansible_networkd/tasks/remove_unmanaged.yml
@@ -0,0 +1,20 @@
+---
+- name: networkd | Collect file list in network dir
+  find:
+    path: /etc/systemd/network
+    hidden: yes
+  register: networkd_found_files
+  check_mode: no
+  changed_when: false
+
+- name: networkd | Remove unmanaged files
+  file:
+    path: "/etc/systemd/network/{{ item.path | basename }}"
+    state: absent
+  with_items:
+   - "{{ networkd_found_files.files }}"
+  when:
+    - (item.path) not in ( networkd_deployed_link | json_query('results[].invocation.module_args.dest') | default([]) )
+    - (item.path) not in ( networkd_deployed_netdev | json_query('results[].invocation.module_args.dest') | default([]) )
+    - (item.path) not in ( networkd_deployed_network | json_query('results[].invocation.module_args.dest') | default([]) )
+  notify: networkd | Do {{ networkd_apply_action }}
diff --git a/roles/kawaidesu.ansible_networkd/templates/networkd.j2 b/roles/kawaidesu.ansible_networkd/templates/networkd.j2
@@ -0,0 +1,12 @@
+# {{ ansible_managed }}
+{% for section in item.content %}
+{% for section_name, section_params in section.items() %}
+
+[{{ section_name }}]
+{% for item in section_params %}
+{% for key, value in item.items() %}
+{{ key }}={{ value }}
+{% endfor %}
+{% endfor %}
+{% endfor %}
+{% endfor %}
diff --git a/roles/kawaidesu.ansible_networkd/templates/resolv.conf.j2 b/roles/kawaidesu.ansible_networkd/templates/resolv.conf.j2
@@ -0,0 +1,4 @@
+# {{ ansible_managed }}
+{% for item in networkd_resolv_conf_content %}
+{{ item }}
+{% endfor %}
diff --git a/roles/maddy/files/awall-rule.json b/roles/maddy/files/awall-rule.json
@@ -0,0 +1,37 @@
+{
+  "description": "Allow mail specific ports on WAN (for maddy)",
+  "import": [ "base" ],
+
+  "filter": [
+    {
+      "in": "WAN",
+      "out": "_fw",
+      "service": "smtp",
+      "action": "accept"
+    },
+    {
+      "in": "WAN",
+      "out": "_fw",
+      "service": { "proto": "tcp", "port": 465 },
+      "action": "accept"
+    },
+    {
+      "in": "WAN",
+      "out": "_fw",
+      "service": "submission",
+      "action": "accept"
+    },
+    {
+      "in": "WAN",
+      "out": "_fw",
+      "service": "imap",
+      "action": "accept"
+    },
+    {
+      "in": "WAN",
+      "out": "_fw",
+      "service": "imaps",
+      "action": "accept"
+    }
+  ]
+}
diff --git a/alpine/config-files/maddy/maddy-service.patch b/roles/maddy/files/maddy-service.patch
diff --git a/roles/maddy/tasks/checks.yml b/roles/maddy/tasks/checks.yml
@@ -0,0 +1,19 @@
+---
+
+- fail: msg="This Role currently only works on AlpineLinux!"
+  when:
+    - ansible_distribution != "Alpine" 
+
+- fail: msg="Option 'services.maddy.hostname' has to be set!"
+  when:
+    - services.maddy.hostname is not defined
+
+- fail: msg="Option 'services.maddy.ssl_cert' has to be set!"
+  when:
+    - services.maddy.ssl_cert is not defined
+
+- fail: msg="Option 'services.maddy.ssl_privkey' has to be set!"
+  when:
+    - services.maddy.ssl_privkey is not defined
+
+#todo: check that ssl files exist+
\ No newline at end of file
diff --git a/roles/maddy/tasks/configure.yml b/roles/maddy/tasks/configure.yml
@@ -0,0 +1,32 @@
+---
+
+- name: Add user 'maddy' to group 'acme-redirect'
+  user:
+    name: maddy
+    groups: acme-redirect
+    append: yes
+  when: 
+    - services.acme_redirect.enable is true
+
+- name: "Generate file: /etc/maddy/maddy.conf"
+  template: 
+    src: maddy.conf.j2
+    dest: /etc/maddy/maddy.conf
+    mode: 0644
+    owner: maddy
+    group: maddy
+
+- name: "Create file: /var/log/maddy.log"
+  file:
+    path:  /var/log/maddy.log
+    state: touch
+    owner: maddy
+    group: maddy
+    mode:  0644
+
+- name: "Patch file: /etc/init.d/maddy"
+  patch:
+    src: maddy-service.patch
+    dest: /etc/init.d/maddy
+  when:
+    - ansible_service_mgr == "openrc"
diff --git a/roles/maddy/tasks/firewall.yml b/roles/maddy/tasks/firewall.yml
@@ -0,0 +1,19 @@
+---
+
+- name: "[awall] Create rule for: maddy"
+  copy:
+    src: awall-rule.json
+    dest: /etc/awall/optional/maddy.json
+    validate: jq '.' %s
+  when: 
+    - ansible_distribution == "Alpine" 
+    - network.awall.enable is true
+
+- name: "[awall] Enable rule for: syncthing"
+  awall:
+    name: maddy
+    state: enabled
+    activate: yes
+  when: 
+    - ansible_distribution == "Alpine" 
+    - network.awall.enable is true
diff --git a/roles/maddy/tasks/install.yml b/roles/maddy/tasks/install.yml
@@ -0,0 +1,9 @@
+---
+
+- name: "Install package: maddy"
+  apk:
+    name: maddy
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Alpine" 
diff --git a/roles/maddy/tasks/main.yml b/roles/maddy/tasks/main.yml
@@ -0,0 +1,32 @@
+---
+
+- include: checks.yml
+  when:
+    - services.maddy.enable is defined
+    - services.maddy.enable is true
+
+- include: install.yml
+  when:
+    - services.maddy.enable is defined
+    - services.maddy.enable is true
+
+- include: configure.yml
+  when:
+    - services.maddy.enable is defined
+    - services.maddy.enable is true
+
+- include: firewall.yml
+  when:
+    - services.maddy.enable is defined
+    - services.maddy.enable is true
+
+- include: start.yml
+  when:
+    - services.maddy.enable is defined
+    - services.maddy.enable is true
+
+- include: remove.yml
+  when:
+    - services.maddy.enable is defined
+    - services.maddy.enable is false
+
diff --git a/roles/maddy/tasks/remove.yml b/roles/maddy/tasks/remove.yml
@@ -0,0 +1,45 @@
+---
+
+- name: "[awall] Disable rule for: maddy"
+  awall:
+    name: maddy
+    state: disabled
+    activate: yes
+  when: 
+    - ansible_distribution == "Alpine" 
+    - network.awall.enable is true
+
+- name: "[awall] Delete rule for: maddy"
+  file:
+    path: /etc/awall/optional/maddy.json
+    state: absent
+  when: 
+    - ansible_distribution == "Alpine" 
+    - network.awall.enable is true
+
+
+- name: "[OpenRC] Disable and stop service: maddy"
+  service:
+    name: "maddy"
+    enabled: yes
+    state: stopped
+  when: 
+    - ansible_service_mgr == "openrc"
+
+
+- name: "Remove package: maddy"
+  apk:
+    name: maddy
+    state: absent
+  when: 
+    - ansible_distribution == "Alpine" 
+
+
+- name: "Delete leftovers"
+  file:
+    path: "{{ item }}"
+    state: absent
+  with_items:
+    - "/etc/init.d/maddy"
+    - "/etc/maddy"
+    - "/var/log/maddy.log"
diff --git a/roles/maddy/tasks/start.yml b/roles/maddy/tasks/start.yml
@@ -0,0 +1,9 @@
+---
+
+- name: "[OpenRC] Enable and restart service: maddy"
+  service:
+    name: maddy
+    enabled: yes
+    state: restarted
+  when:
+    - ansible_service_mgr == "openrc"
diff --git a/roles/maddy/templates/maddy.conf.j2 b/roles/maddy/templates/maddy.conf.j2
@@ -0,0 +1,189 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+
+## maddy 0.4 - default configuration file
+
+log syslog /var/log/maddy.log
+
+# ----------------------------------------------------------------------------
+# Base variables
+ $(hostname) = {{services.maddy.hostname}} 
+$(primary_domain) = ctu.cx
+$(local_domains) = $(hostname) $(primary_domain) antifa.jetzt thein.ovh ctucx.de
+
+tls file {{services.maddy.ssl_cert}} {{services.maddy.ssl_privkey}}
+
+# ----------------------------------------------------------------------------
+# Local storage & authentication
+
+# pass_table provides local hashed passwords storage for authentication of
+# users. It can be configured to use any "table" module, in default
+# configuration a table in SQLite DB is used.
+# Table can be replaced to use e.g. a file for passwords. Or pass_table module
+# can be replaced altogether to use some external source of credentials (e.g.
+# PAM, /etc/shadow file).
+#
+# If table module supports it (sql_table does) - credentials can be managed
+# using 'maddyctl creds' command.
+
+auth.pass_table local_authdb {
+    table sql_table {
+        driver sqlite3
+        dsn credentials.db
+        table_name passwords
+    }
+}
+
+# imapsql module stores all indexes and metadata necessary for IMAP using a
+# relational database. It is used by IMAP endpoint for mailbox access and
+# also by SMTP & Submission endpoints for delivery of local messages.
+#
+# IMAP accounts, mailboxes and all message metadata can be inspected using
+# imap-* subcommands of maddyctl utility.
+
+storage.imapsql local_mailboxes {
+    driver sqlite3
+    dsn imapsql.db
+}
+
+# ----------------------------------------------------------------------------
+# SMTP endpoints + message routing
+
+hostname $(hostname)
+
+msgpipeline local_routing {
+    dmarc yes
+    check {
+        require_mx_record
+        dkim
+        spf
+    }
+
+    # Insert handling for special-purpose local domains here.
+    # e.g.
+    # destination lists.example.org {
+    #     deliver_to lmtp tcp://127.0.0.1:8024
+    # }
+
+    destination postmaster $(local_domains) {
+        modify {
+            replace_rcpt static {
+               entry postmaster           postmaster@$(primary_domain)
+               entry leon@thein.ovh       leah@ctu.cx
+               entry leah@thein.ovh       leah@ctu.cx
+               entry leah@antifa.jetzt    leah@ctu.cx
+            }
+
+            # Implement plus-address notation.
+            replace_rcpt regexp "(.+)\+(.+)@(.+)" "$1@$3"
+
+            replace_rcpt regexp "(.+)@ctucx.de" "leah@ctu.cx"
+            replace_rcpt regexp "(.+)@ctu.cx"   "leah@ctu.cx"
+        }
+
+        deliver_to &local_mailboxes
+    }
+
+    default_destination {
+        reject 550 5.1.1 "User doesn't exist"
+    }
+}
+
+smtp tcp://0.0.0.0:25 {
+    limits {
+        # Up to 20 msgs/sec across max. 10 SMTP connections.
+        all rate 20 1s
+        all concurrency 10
+    }
+
+    source $(local_domains) {
+        reject 501 5.1.8 "Use Submission for outgoing SMTP"
+    }
+
+    default_source {
+        destination postmaster $(local_domains) {
+            deliver_to &local_routing
+        }
+
+        default_destination {
+            reject 550 5.1.1 "User doesn't exist"
+        }
+    }
+}
+
+submission tls://0.0.0.0:465 tcp://0.0.0.0:587 {
+    limits {
+        # Up to 50 msgs/sec across any amount of SMTP connections.
+        all rate 50 1s
+    }
+
+    auth &local_authdb
+
+    source $(local_domains) {
+        destination postmaster $(local_domains) {
+            deliver_to &local_routing
+        }
+
+        default_destination {
+            modify {
+                dkim $(primary_domain) $(local_domains) default {
+                	newkey_algo ed25519
+                }
+            }
+            deliver_to &remote_queue
+        }
+    }
+
+    default_source {
+        reject 501 5.1.8 "Non-local sender domain"
+    }
+}
+
+target.remote outbound_delivery {
+    limits {
+        # Up to 20 msgs/sec across max. 10 SMTP connections
+        # for each recipient domain.
+        destination rate 20 1s
+        destination concurrency 10
+    }
+
+    mx_auth {
+        dane
+
+        mtasts {
+            cache fs
+            fs_dir mtasts_cache/
+        }
+
+        local_policy {
+            min_tls_level encrypted
+            min_mx_level none
+        }
+    }
+}
+
+target.queue remote_queue {
+    target &outbound_delivery
+
+    autogenerated_msg_domain $(primary_domain)
+
+    bounce {
+        destination postmaster $(local_domains) {
+            deliver_to &local_routing
+        }
+
+        default_destination {
+            reject 550 5.0.0 "Refusing to send DSNs to non-local addresses"
+        }
+    }
+}
+
+# ----------------------------------------------------------------------------
+# IMAP endpoints
+
+imap tls://0.0.0.0:993 tcp://0.0.0.0:143 {
+    auth &local_authdb
+    storage &local_mailboxes
+}
diff --git a/roles/nginx/files/awall-rule.json b/roles/nginx/files/awall-rule.json
@@ -0,0 +1,19 @@
+{
+  "description": "Allow HTTP(S) on WAN (for nginx)",
+  "import": [ "base" ],
+
+  "filter": [
+    {
+      "in": "WAN",
+      "out": "_fw",
+      "service": "http",
+      "action": "accept"
+    },
+    {
+      "in": "WAN",
+      "out": "_fw",
+      "service": "https",
+      "action": "accept"
+    }
+  ]
+}
diff --git a/roles/nginx/files/awall-rule_httpsOnly.json b/roles/nginx/files/awall-rule_httpsOnly.json
@@ -0,0 +1,13 @@
+{
+  "description": "Allow HTTPS on WAN (for nginx)",
+  "import": [ "base" ],
+
+  "filter": [
+    {
+      "in": "WAN",
+      "out": "_fw",
+      "service": "https",
+      "action": "accept"
+    }
+  ]
+}
diff --git a/roles/nginx/files/nginx.conf b/roles/nginx/files/nginx.conf
@@ -0,0 +1,59 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+user nginx;
+
+worker_processes auto;
+
+pcre_jit on;
+
+error_log /var/log/nginx/error.log warn;
+
+include /etc/nginx/modules/*.conf;
+
+
+events {
+	worker_connections 1024;
+}
+
+http {
+	include /etc/nginx/mime.types;
+	default_type application/octet-stream;
+
+	server_tokens off;
+
+	server_names_hash_bucket_size 64;
+
+	types_hash_max_size 1024;
+	types_hash_bucket_size 128;
+
+	client_max_body_size 1m;
+
+	keepalive_timeout 65;
+
+	sendfile on;
+
+	tcp_nodelay on;
+
+	ssl_prefer_server_ciphers on;
+
+	ssl_session_cache shared:SSL:2m;
+
+	gzip on;
+
+	gzip_vary on;
+
+	#gzip_static on;
+
+
+	# Specifies the main log format.
+	log_format main '$remote_addr - $remote_user [$time_local] "$request" '
+			'$status $body_bytes_sent "$http_referer" '
+			'"$http_user_agent" "$http_x_forwarded_for"';
+
+	access_log /var/log/nginx/access.log main;
+
+	include /etc/nginx/conf.d/*.conf;
+	include /etc/nginx/vhosts/*.conf;
+}
diff --git a/roles/nginx/files/proxy_settings.conf b/roles/nginx/files/proxy_settings.conf
@@ -0,0 +1,20 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+#make websockets possible
+proxy_http_version      1.1;
+proxy_set_header        Upgrade $http_upgrade;
+proxy_set_header        Connection "upgrade";
+
+#some headers needed for some software to work
+proxy_redirect          off;
+proxy_connect_timeout   90;
+proxy_send_timeout      90;
+proxy_read_timeout      90;
+proxy_set_header        Host $host;
+proxy_set_header        X-Real-IP $remote_addr;
+proxy_set_header        X-Forwarded-For $proxy_add_x_forwarded_for;
+proxy_set_header        X-Forwarded-Proto $scheme;
+proxy_set_header        X-Forwarded-Host $host;
+proxy_set_header        X-Forwarded-Server $host;
diff --git a/roles/nginx/files/ssl_settings.conf b/roles/nginx/files/ssl_settings.conf
@@ -0,0 +1,25 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+ssl_session_timeout 1d;
+ssl_session_cache shared:MozSSL:10m;  # about 40000 sessions
+ssl_session_tickets off;
+
+# curl https://ssl-config.mozilla.org/ffdhe2048.txt > /path/to/dhparam
+ssl_dhparam /etc/nginx/dhparam;
+
+# intermediate configuration
+ssl_protocols TLSv1.2 TLSv1.3;
+ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
+ssl_prefer_server_ciphers off;
+
+# HSTS (ngx_http_headers_module is required) (63072000 seconds)
+add_header Strict-Transport-Security "max-age=63072000" always;
+
+# OCSP stapling
+ssl_stapling on;
+ssl_stapling_verify on;
+
+# replace with the IP address of your resolver
+resolver 127.0.0.1;
diff --git a/roles/nginx/tasks/main.yml b/roles/nginx/tasks/main.yml
@@ -0,0 +1,242 @@
+---
+
+#install it
+
+- name: "[Alpine] Install package: nginx" 
+  apk:
+    name: nginx
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.nginx.enable is true
+
+- name: "[Archlinux] Install package: nginx" 
+  pacman:
+    name: nginx
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Archlinux" 
+    - services.nginx.enable is true
+
+
+# configure it
+
+- name: adding user nginx to group acme-redirect
+  user:
+    name: nginx
+    groups: acme-redirect
+    append: yes
+  when: 
+    - services.nginx.enable is true
+    - services.acme_redirect.enable is true
+
+- name: "Create directory: /etc/nginx/passwd"
+  file:
+    path:  /etc/nginx/passwd
+    state: directory
+    owner: nginx
+    group: nginx
+    mode:  0700
+  when: 
+    - services.nginx.enable is true
+
+- name: Download dh-params from mozilla to /etc/nginx/dhparam
+  get_url:
+    url: https://ssl-config.mozilla.org/ffdhe2048.txt
+    dest: /etc/nginx/dhparam
+    owner: nginx
+    group: nginx    
+  when: 
+    - services.nginx.enable is true
+
+- name: "Create file: /etc/nginx/nginx.conf"
+  copy:
+    src: nginx.conf
+    dest: /etc/nginx/nginx.conf
+    owner: nginx
+    group: nginx
+  when: 
+    - services.nginx.enable is true
+
+- name: "Create file: /etc/nginx/ssl.conf"
+  copy:
+    src: ssl_settings.conf
+    dest: /etc/nginx/ssl.conf
+    owner: nginx
+    group: nginx
+  when: 
+    - services.nginx.enable is true
+
+- name: "Create file: /etc/nginx/proxy.conf"
+  copy:
+    src: proxy_settings.conf
+    dest: /etc/nginx/proxy.conf
+    owner: nginx
+    group: nginx
+    mode: 0755
+  when: 
+    - services.nginx.enable is true
+
+- name: "Create directory: /etc/nginx/conf.d"
+  file:
+    path: /etc/nginx/conf.d
+    state: directory
+    owner: nginx
+    group: nginx
+  when: 
+    - services.nginx.enable is true
+
+- name: "Recreate directory: /etc/nginx/vhost"
+  file:
+    path: /etc/nginx/vhosts
+    state: "{{ item }}"
+    owner: nginx
+    group: nginx
+  with_items:
+    - absent
+    - directory
+  when: 
+    - services.nginx.enable is true
+
+- name: Generate nginx vhosts
+  template:
+    src: vhost.conf.j2
+    dest: /etc/nginx/vhosts/{{item.key}}.conf
+    owner: nginx
+    group: nginx
+    mode: 0644
+  loop: "{{ lookup('dict', services.nginx.vhosts, wantlist=True) }}"
+  when: 
+    - services.nginx.enable is true
+    - services.nginx.vhosts is defined
+
+
+# firewall it
+
+- name: "[awall] Create rule for: nginx (http,https)"
+  copy:
+    src: awall-rule.json
+    dest: /etc/awall/optional/nginx.json
+    validate: jq '.' %s
+  when: 
+    - ansible_distribution == "Alpine" 
+    - network.awall.enable is true
+    - services.nginx.enable is true
+    - services.nginx.sslOnly is not defined or services.nginx.sslOnly is false
+
+- name: "[awall] Create rule for: nginx (https)"
+  copy:
+    src: awall-rule_httpsOnly.json
+    dest: /etc/awall/optional/nginx.json
+    validate: jq '.' %s
+  when: 
+    - ansible_distribution == "Alpine" 
+    - network.awall.enable is true
+    - services.nginx.enable is true
+    - services.nginx.sslOnly is defined
+    - services.nginx.sslOnly is true
+
+- name: "[awall] Enable rule for: nginx"
+  awall:
+    name: nginx
+    state: enabled
+    activate: yes
+  when:
+    - ansible_distribution == "Alpine" 
+    - network.awall.enable is true
+    - services.nginx.enable is true
+
+
+# (re)start it
+
+- name: "[OpenRC] Enable and restart service: nginx"
+  service:
+    name: nginx
+    enabled: yes
+    state: restarted
+  when:
+    - ansible_service_mgr == "openrc"
+    - services.nginx.enable is true
+
+- name: "[systemd] Enable and restart service: nginx"
+  systemd:
+    name: nginx
+    enabled: yes
+    state: restarted
+  when:
+    - ansible_service_mgr == "systemd"
+    - services.nginx.enable is true
+
+
+# stop it 
+
+- name: "[OpenRC] Disable and stop service: nginx"
+  service:
+    name: nginx
+    enabled: no
+    state: stopped
+  when:
+    - ansible_service_mgr == "openrc"
+    - services.nginx.enable is false
+
+- name: "[systemd] Disable and stop service: nginx"
+  systemd:
+    name: nginx
+    enabled: no
+    state: stopped
+  when:
+    - ansible_service_mgr == "systemd"
+    - services.nginx.enable is false
+
+
+#defirewall it
+
+- name: "[awall] Disable rule for: nginx"
+  awall:
+    name: nginx
+    state: disabled
+    activate: yes
+  when:
+    - ansible_distribution == "Alpine" 
+    - network.awall.enable is true
+    - services.nginx.enable is false
+
+- name: "[awall] Delete rule for: bind"
+  file:
+    path: /etc/awall/optional/nginx.json
+    state: absent 
+  when:
+    - ansible_distribution == "Alpine" 
+    - network.awall.enable is true
+    - services.nginx.enable is false
+
+
+# remove it 
+
+- name: "[Alpine] Remove package: nginx" 
+  apk:
+    name: nginx
+    state: absent
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.nginx.enable is false
+
+- name: "[Archlinux] Remove package: nginx" 
+  pacman:
+    name: nginx
+    state: absent
+  when: 
+    - ansible_distribution == "Archlinux" 
+    - services.nginx.enable is false
+
+
+# remove leftover files
+
+- name: "Remove directory: /etc/nginx"
+  file:
+    path: /etc/nginx
+    state: absent
+  when: 
+    - services.nginx.enable is false
diff --git a/roles/nginx/templates/vhost.conf.j2 b/roles/nginx/templates/vhost.conf.j2
@@ -0,0 +1,83 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+{% if services.nginx.sslOnly is not defined or services.nginx.sslOnly is false %}
+server {
+	{% if item.value.defaultServer is defined and item.value.defaultServer is true%}
+	listen 80 default_server;
+	listen [::]:80 default_server;
+	{% else %}
+	listen 80;
+	listen [::]:80;
+	{% endif %}
+
+	server_name {{item.key}};
+
+
+	{% if item.value.root is defined %}
+	root {{ item.value.root }};
+	{% endif %}	
+
+	{% for location in item.value.locations %}
+	location {{ location.path }} {
+		{% if location.proxy is defined %}
+		proxy_pass {{ location.proxy }};
+		include /etc/nginx/proxy.conf;
+		{% endif %}
+		{% if location.root is defined %}
+		root {{ location.root }};
+		{% endif %}		
+		{% if location.extraConfig is defined %}
+		{{ location.extraConfig }}
+		{% endif %}
+	}
+	{% endfor %}
+
+	{% if item.value.extraConfig is defined %}
+	{{ item.value.extraConfig }}
+	{% endif %}
+}
+
+{% endif %}
+{% if item.value.ssl.enable is defined and item.value.ssl.enable is true %}
+server {
+	{% if item.value.defaultServer is defined and item.value.defaultServer is true%}
+	listen 443 ssl default_server;
+	listen [::]:443 ssl default_server;
+	{% else %}
+	listen 443 ssl ;
+	listen [::]:443 ssl;
+	{% endif %}
+
+	server_name {{item.key}};
+
+	ssl_certificate     "{{ item.value.ssl.cert }}";
+	ssl_certificate_key "{{ item.value.ssl.privkey }}";
+	include /etc/nginx/ssl.conf;
+
+
+	{% if item.value.root is defined %}
+	root {{ item.value.root }};
+	{% endif %}	
+
+	{% for location in item.value.locations %}
+	location {{ location.path }} {
+		{% if location.proxy is defined %}
+		proxy_pass {{ location.proxy }};
+		include /etc/nginx/proxy.conf;
+		{% endif %}
+		{% if location.root is defined %}
+		root {{ location.root }};
+		{% endif %}		
+		{% if location.extraConfig is defined %}
+		{{ location.extraConfig }}
+		{% endif %}
+	}
+	{% endfor %}
+
+	{% if item.value.extraConfig is defined %}
+	{{ item.value.extraConfig }}
+	{% endif %}
+}
+{% endif %}+
\ No newline at end of file
diff --git a/roles/oeffi-web/tasks/main.yml b/roles/oeffi-web/tasks/main.yml
@@ -0,0 +1,164 @@
+
+---
+
+
+# check 
+
+- fail: msg="This role currently only supports AlpineLinux!"
+  when:
+    - services.oeffi_web.enable is true
+    - ansible_distribution != "Alpine" 
+
+- fail: msg="Option 'services.oeffi_web.instances' has to be set!"
+  when:
+    - services.oeffi_web.enable is true
+    - services.oeffi_web.instances is not defined 
+
+
+# install it 
+
+- name: "[Alpine] Install package: oeffi-web"
+  apk:
+    name: oeffi-web
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.oeffi_web.enable is true
+
+
+# configure it 
+
+- name: "[OpenRC] Create service files" 
+  template: 
+    src: oeffi-web.initd.j2
+    dest: "/etc/init.d/oeffi-web{{item}}"
+    mode: 0755
+  loop:
+    - 1
+    - 2
+    - 3
+    - 4
+  when: 
+    - ansible_service_mgr == "openrc"
+    - services.oeffi_web.enable is true
+
+- name: "Create directory: /var/log/oeffi-web"
+  file:
+    path: "/var/log/oeffi-web"
+    mode: 0755
+    state: directory
+  loop:
+    - 1
+    - 2
+    - 3
+    - 4
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.oeffi_web.enable is true
+
+- name: "Create logfiles in /var/log/oeffi-web"
+  file:
+    path: "/var/log/oeffi-web/{{item}}.log"
+    mode: 0777
+    state: touch
+  loop:
+    - 1
+    - 2
+    - 3
+    - 4
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.oeffi_web.enable is true
+
+- name: "[nginx] Create vhost" 
+  template: 
+    src: nginx-vhost.conf.j2
+    dest: /etc/nginx/conf.d/oeffi-web.conf
+    mode: 0644
+    owner: nginx
+    group: nginx
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.oeffi_web.enable is true
+    - services.oeffi_web.nginx.enable is true
+
+
+# start it
+
+- name: "[OpenRC] Enable and restart service: oeffi-web"
+  service:
+    name: "oeffi-web{{item}}"
+    enabled: yes
+    state: restarted
+  loop:
+    - 1
+    - 2
+    - 3
+    - 4
+  when: 
+    - ansible_service_mgr == "openrc"
+    - services.oeffi_web.enable is true
+
+- name: "[OpenRC] Restart service: nginx"
+  service:
+    name: nginx
+    state: restarted
+  when: 
+    - ansible_service_mgr == "openrc"
+    - services.oeffi_web.enable is true
+    - services.oeffi_web.nginx.enable is true
+
+
+# remove it
+
+- name: "[OpenRC] Disable and stop service: oeffi-web"
+  service:
+    name: "oeffi-web{{item}}"
+    enabled: no
+    state: stopped
+  loop:
+    - 1
+    - 2
+    - 3
+    - 4
+  when: 
+    - ansible_service_mgr == "openrc"
+    - services.oeffi_web.enable is false
+
+- name: "[Alpine] Remove package: oeffi-web"
+  apk:
+    name: oeffi-web
+    state: absent
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.oeffi_web.enable is false
+
+- name: "Delete files: /etc/init.d/oeffi-webX"
+  file:
+    path: "/etc/init.d/oeffi-web{{ item }}"
+    state: absent
+  loop:
+    - 1
+    - 2
+    - 3
+    - 4
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.oeffi_web.enable is false
+
+- name: "Delete directory: /var/log/oeffi-web"
+  file:
+    path: /var/log/oeffi-web
+    state: absent
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.oeffi_web.enable is false
+
+- name: "Delete file: /etc/nginx/conf.d/oeffi-web.conf"
+  file:
+    path: /etc/nginx/conf.d/oeffi-web.conf
+    state: absent
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.oeffi_web.enable is false
diff --git a/roles/oeffi-web/templates/nginx-vhost.conf.j2 b/roles/oeffi-web/templates/nginx-vhost.conf.j2
@@ -0,0 +1,50 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+
+upstream oeffiweb {
+	least_conn;
+	server 127.0.0.1:5001;
+	server 127.0.0.1:5002;
+	server 127.0.0.1:5003;
+	server 127.0.0.1:5004;
+}
+
+{% if  services.oeffi_web.nginx.sslOnly is not defined or services.oeffi_web.nginx.sslOnly is false %}
+server {
+	listen 80 ;
+	listen [::]:80;
+	
+	server_name {{ services.oeffi_web.nginx.domain }};
+
+	location / {
+		proxy_pass http://oeffiweb;
+	}
+
+	location /assets {
+		root /usr/share/oeffi-web;
+	}
+}
+
+{% endif %}
+{% if services.oeffi_web.nginx.ssl.enable is true %}
+server {
+	listen 443 ssl;
+	listen [::]:443 ssl;
+
+	ssl_certificate "{{ services.oeffi_web.nginx.ssl.cert }}";
+	ssl_certificate_key "{{ services.oeffi_web.nginx.ssl.privkey }}";
+	include /etc/nginx/ssl.conf;
+	
+	server_name {{ services.oeffi_web.nginx.domain }};
+
+	location / {
+		proxy_pass http://oeffiweb;
+	}
+
+	location /assets {
+		root /usr/share/oeffi-web;
+	}
+}
+{% endif %}+
\ No newline at end of file
diff --git a/roles/oeffi-web/templates/oeffi-web.initd.j2 b/roles/oeffi-web/templates/oeffi-web.initd.j2
@@ -0,0 +1,35 @@
+#!/sbin/openrc-run
+
+#
+# !!! This file is managed by Ansible !!!
+#
+
+
+supervisor=supervise-daemon
+
+name="oeffi-web"
+description="fast and simple tripplanner for the web"
+
+PORT=500{{item}}
+PID_FILE=/run/oeffi-web/{{item}}
+CACHE_PATH=/var/lib/oeffisearch
+
+export PORT
+export CACHE_PATH
+
+command="/usr/bin/oeffi-web"
+command_user=oeffisearch:oeffisearch
+command_background=true
+pidfile=/run/oeffi-web/{{item}}
+directory="/usr/share/oeffi-web"
+output_log="/var/log/oeffi-web/{{item}}.log"
+
+depend() {
+	need net localmount
+	after firewall
+}
+
+start_pre() {
+	checkpath -d -o oeffisearch:oeffisearch /run/oeffi-web
+	checkpath -d -o oeffisearch:oeffisearch /var/lib/oeffisearch
+}+
\ No newline at end of file
diff --git a/roles/oeffisearch/tasks/main.yml b/roles/oeffisearch/tasks/main.yml
@@ -0,0 +1,163 @@
+---
+
+
+# check 
+
+- fail: msg="This role currently only supports AlpineLinux!"
+  when:
+    - services.oeffisearch.enable is true
+    - ansible_distribution != "Alpine" 
+
+- fail: msg="Option 'services.oeffisearch.instances' has to be set!"
+  when:
+    - services.oeffisearch.enable is true
+    - services.oeffisearch.instances is not defined 
+
+
+# install it 
+
+- name: "[Alpine] Install package: oeffisearch"
+  apk:
+    name: oeffisearch
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.oeffisearch.enable is true
+
+
+# configure it 
+
+- name: "[OpenRC] Create service files" 
+  template: 
+    src: oeffisearch.initd.j2
+    dest: "/etc/init.d/oeffisearch{{item}}"
+    mode: 0755
+  loop:
+    - 1
+    - 2
+    - 3
+    - 4
+  when: 
+    - ansible_service_mgr == "openrc"
+    - services.oeffisearch.enable is true
+
+- name: "Create directory: /var/log/oeffisearch"
+  file:
+    path: "/var/log/oeffisearch"
+    mode: 0755
+    state: directory
+  loop:
+    - 1
+    - 2
+    - 3
+    - 4
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.oeffisearch.enable is true
+
+- name: "Create logfiles in /var/log/oeffisearch"
+  file:
+    path: "/var/log/oeffisearch/{{item}}.log"
+    mode: 0777
+    state: touch
+  loop:
+    - 1
+    - 2
+    - 3
+    - 4
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.oeffisearch.enable is true
+
+- name: "[nginx] Create vhost" 
+  template: 
+    src: nginx-vhost.conf.j2
+    dest: /etc/nginx/conf.d/oeffisearch.conf
+    mode: 0644
+    owner: nginx
+    group: nginx
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.oeffisearch.enable is true
+    - services.oeffisearch.nginx.enable is true
+
+
+# start it
+
+- name: "[OpenRC] Enable and restart service: oeffisearch"
+  service:
+    name: "oeffisearch{{item}}"
+    enabled: yes
+    state: restarted
+  loop:
+    - 1
+    - 2
+    - 3
+    - 4
+  when: 
+    - ansible_service_mgr == "openrc"
+    - services.oeffisearch.enable is true
+
+- name: "[OpenRC] Restart service: nginx"
+  service:
+    name: nginx
+    state: restarted
+  when: 
+    - ansible_service_mgr == "openrc"
+    - services.oeffisearch.enable is true
+    - services.oeffisearch.nginx.enable is true
+
+
+# remove it
+
+- name: "[OpenRC] Disable and stop service: oeffisearch"
+  service:
+    name: "oeffisearch{{item}}"
+    enabled: no
+    state: stopped
+  loop:
+    - 1
+    - 2
+    - 3
+    - 4
+  when: 
+    - ansible_service_mgr == "openrc"
+    - services.oeffisearch.enable is false
+
+- name: "[Alpine] Remove package: oeffisearch"
+  apk:
+    name: oeffisearch
+    state: absent
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.oeffisearch.enable is false
+
+- name: "Delete files: /etc/init.d/oeffisearchX"
+  file:
+    path: "/etc/init.d/oeffisearch{{ item }}"
+    state: absent
+  loop:
+    - 1
+    - 2
+    - 3
+    - 4
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.oeffisearch.enable is false
+
+- name: "Delete directory: /var/log/oeffisearch"
+  file:
+    path: /var/log/oeffisearch
+    state: absent
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.oeffisearch.enable is false
+
+- name: "Delete file: /etc/nginx/conf.d/oeffisearch.conf"
+  file:
+    path: /etc/nginx/conf.d/oeffisearch.conf
+    state: absent
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.oeffisearch.enable is false
diff --git a/roles/oeffisearch/templates/nginx-vhost.conf.j2 b/roles/oeffisearch/templates/nginx-vhost.conf.j2
@@ -0,0 +1,51 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+upstream oeffisearch {
+	least_conn;
+	server 127.0.0.1:8081;
+	server 127.0.0.1:8082;
+	server 127.0.0.1:8083;
+	server 127.0.0.1:8084;
+}
+
+{% if  services.oeffisearch.nginx.sslOnly is not defined or services.oeffisearch.nginx.sslOnly is false %}
+server {
+	listen 80 ;
+	listen [::]:80;
+	
+	server_name {{ services.oeffisearch.nginx.domain }};
+
+	location / {
+		try_files $uri $uri/ @api;
+		root /usr/share/oeffisearch;
+	}
+
+	location @api {
+		proxy_pass http://oeffisearch;
+	}
+}
+
+{% endif %}
+{% if services.oeffisearch.nginx.ssl.enable is true %}
+server {
+	listen 443 ssl;
+	listen [::]:443 ssl;
+
+	ssl_certificate "{{ services.oeffisearch.nginx.ssl.cert }}";
+	ssl_certificate_key "{{ services.oeffisearch.nginx.ssl.privkey }}";
+	include /etc/nginx/ssl.conf;
+	
+	server_name {{ services.oeffisearch.nginx.domain }};
+
+	location / {
+		try_files $uri $uri/ @api;
+		root /usr/share/oeffisearch;
+	}
+
+	location @api {
+		proxy_pass http://oeffisearch;
+	}
+}
+{% endif %}+
\ No newline at end of file
diff --git a/roles/oeffisearch/templates/oeffisearch.initd.j2 b/roles/oeffisearch/templates/oeffisearch.initd.j2
@@ -0,0 +1,34 @@
+#!/sbin/openrc-run
+
+#
+# !!! This file is managed by Ansible !!!
+#
+
+supervisor=supervise-daemon
+
+name="oeffisearch"
+description="fast and simple tripplanner for the web"
+
+PORT=808{{item}}
+PID_FILE=/run/oeffisearch/{{item}}
+CACHE_PATH=/var/lib/oeffisearch
+
+export PORT
+export CACHE_PATH
+
+command="/usr/bin/oeffisearch"
+command_user="oeffisearch:oeffisearch"
+command_background=true
+pidfile=/run/oeffisearch/{{item}}
+directory="/var/lib/oeffisearch"
+output_log="/var/log/oeffisearch/{{item}}.log"
+
+depend() {
+	need net localmount
+	after firewall
+}
+
+start_pre() {
+	checkpath -d -o oeffisearch:oeffisearch /run/oeffisearch
+	checkpath -d -o oeffisearch:oeffisearch /var/lib/oeffisearch
+}+
\ No newline at end of file
diff --git a/roles/passwordstore/tasks/main.yml b/roles/passwordstore/tasks/main.yml
@@ -0,0 +1,6 @@
+---
+
+- name: get stuff from password store
+  set_fact:
+    "pass_{{ item.value.name }}": "{{ lookup('community.general.passwordstore', item.key + ' returnall=true') }}"
+  loop: "{{ lookup('dict', passwordstore, wantlist=True) }}"
diff --git a/roles/pleroma/meta/main.yml b/roles/pleroma/meta/main.yml
@@ -0,0 +1,5 @@
+---
+
+dependencies:
+  - role: postgresql
+  +
\ No newline at end of file
diff --git a/roles/pleroma/tasks/checks.yml b/roles/pleroma/tasks/checks.yml
@@ -0,0 +1,47 @@
+---
+
+- fail: msg="This role currently only supports AlpineLinux!"
+  when:
+    - ansible_distribution != "Alpine" 
+
+- fail: msg="This role depends on postgres!"
+  when:
+    - services.postgresql.enable is not defined or services.postgresql.enable is not true
+
+
+- fail: msg="Option 'services.pleroma.configFile' has to be set!"
+  when:
+    - services.pleroma.configFile is not defined 
+
+- fail: msg="Option 'services.pleroma.secretsContent' has to be set!"
+  when:
+    - services.pleroma.secretsContent is not defined 
+
+
+- fail: msg="Option 'services.pleroma.nginx.domain' has to be set when using nginx!"
+  when:
+    - services.pleroma.nginx.enable is defined
+    - services.pleroma.nginx.enable is true
+    - services.pleroma.nginx.domain is not defined
+
+- fail: msg="Option 'services.pleroma.nginx.sslOnly' has to be set when using nginx!"
+  when:
+    - services.pleroma.nginx.enable is defined
+    - services.pleroma.nginx.enable is true
+    - services.pleroma.nginx.sslOnly is not defined
+
+- fail: msg="Option 'services.pleroma.nginx.ssl.cert' has to be set when using nginx with ssl!"
+  when:
+    - services.pleroma.nginx.enable is defined
+    - services.pleroma.nginx.enable is true
+    - services.pleroma.nginx.ssl.enable is defined
+    - services.pleroma.nginx.ssl.enable is true
+    - services.pleroma.nginx.ssl.cert is not defined
+
+- fail: msg="Option 'services.pleroma.nginx.ssl.privkey' has to be set when using nginx with ssl!"
+  when:
+    - services.pleroma.nginx.enable is defined
+    - services.pleroma.nginx.enable is true
+    - services.pleroma.nginx.ssl.enable is defined
+    - services.pleroma.nginx.ssl.enable is true
+    - services.pleroma.nginx.ssl.privkey is not defined
diff --git a/roles/pleroma/tasks/configure.yml b/roles/pleroma/tasks/configure.yml
@@ -0,0 +1,17 @@
+---
+
+- name: "Copy config-file to: /etc/pleroma/config.exs" 
+  copy: 
+    src: "{{ services.pleroma.configFile }}"
+    dest: /etc/pleroma/config.exs
+    mode: 0755
+    owner: pleroma
+    group: pleroma
+
+- name: "Copy secretsContent to: /var/lib/pleroma/secret.exs"
+  copy:
+    content: "{{ services.pleroma.secretsContent }}"
+    dest:    /var/lib/pleroma/secret.exs
+    owner:   pleroma
+    group:   pleroma
+    mode:    0700
diff --git a/roles/pleroma/tasks/install.yml b/roles/pleroma/tasks/install.yml
@@ -0,0 +1,9 @@
+---
+
+- name: "[Alpine] Install package: pleroma"
+  apk:
+    name: pleroma
+    state: present
+    update_cache: yes
+  when:
+    - ansible_distribution == "Alpine" 
diff --git a/roles/pleroma/tasks/main.yml b/roles/pleroma/tasks/main.yml
@@ -0,0 +1,37 @@
+---
+
+- include: checks.yml
+  when:
+    - services.pleroma.enable is defined
+    - services.pleroma.enable is true
+
+- include: install.yml
+  when:
+    - services.pleroma.enable is defined
+    - services.pleroma.enable is true
+
+- include: configure.yml
+  when:
+    - services.pleroma.enable is defined
+    - services.pleroma.enable is true
+
+- include: start.yml
+  when:
+    - services.pleroma.enable is defined
+    - services.pleroma.enable is true
+
+- include: nginx.yml
+  when:
+    - services.pleroma.enable is defined
+    - services.pleroma.enable is true
+    - services.pleroma.nginx is defined
+    - services.pleroma.nginx.enable is true
+
+- include: remove.yml
+  when:
+    - services.pleroma.enable is defined
+    - services.pleroma.enable is false
+
+
+
+
diff --git a/roles/pleroma/tasks/nginx.yml b/roles/pleroma/tasks/nginx.yml
@@ -0,0 +1,14 @@
+---
+
+- name: "[nginx] Create vhost" 
+  template:
+    src: nginx-vhost.conf.j2
+    dest: /etc/nginx/conf.d/pleroma.conf
+    mode: 0644
+    owner: nginx
+    group: nginx
+
+- name: "[OpenRC] Restart service: nginx"
+  service:
+    name: nginx
+    state: restarted
diff --git a/roles/pleroma/tasks/remove.yml b/roles/pleroma/tasks/remove.yml
diff --git a/roles/pleroma/tasks/start.yml b/roles/pleroma/tasks/start.yml
@@ -0,0 +1,9 @@
+---
+
+- name: "[OpenRC] Enable and restart service: pleroma"
+  service:
+    name: pleroma
+    enabled: yes
+    state: restarted
+  when:
+    - ansible_service_mgr == "openrc"
diff --git a/roles/pleroma/templates/nginx-vhost.conf.j2 b/roles/pleroma/templates/nginx-vhost.conf.j2
@@ -0,0 +1,57 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+{% if  services.pleroma.nginx.sslOnly is not defined or services.pleroma.nginx.sslOnly is false %}
+server {
+	listen 80 ;
+	listen [::]:80;
+	
+	server_name {{ services.pleroma.nginx.domain }};
+
+	gzip_vary on;
+	gzip_proxied any;
+	gzip_comp_level 6;
+   	gzip_buffers 16 8k;
+	gzip_http_version 1.1;
+	gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript application/activity+json application/atom+xml;
+
+	client_max_body_size 150m;
+	ignore_invalid_headers off;
+
+
+	location / {
+		proxy_pass http://localhost:4000/;
+		include /etc/nginx/proxy.conf;
+	}
+}
+
+{% endif %}
+{% if services.pleroma.nginx.ssl.enable is true %}
+server {
+	listen 443 ssl;
+	listen [::]:443 ssl;
+
+	ssl_certificate "{{ services.pleroma.nginx.ssl.cert }}";
+	ssl_certificate_key "{{ services.pleroma.nginx.ssl.privkey }}";
+	include /etc/nginx/ssl.conf;
+	
+	server_name {{ services.pleroma.nginx.domain }};
+
+	gzip_vary on;
+	gzip_proxied any;
+	gzip_comp_level 6;
+   	gzip_buffers 16 8k;
+	gzip_http_version 1.1;
+	gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript application/activity+json application/atom+xml;
+
+	client_max_body_size 150m;
+	ignore_invalid_headers off;
+
+
+	location / {
+		proxy_pass http://localhost:4000/;
+		include /etc/nginx/proxy.conf;
+	}
+}
+{% endif %}
diff --git a/roles/postgresql/tasks/main.yml b/roles/postgresql/tasks/main.yml
@@ -0,0 +1,84 @@
+---
+
+- name: "[Alpine] Install package: postgresql"
+  apk:
+    name: postgresql postgresql-contrib
+    state: present
+    update_cache: yes  
+  when:
+    - ansible_distribution == "Alpine" 
+    - services.postgresql.enable is defined
+    - services.postgresql.enable is true
+
+- name: "[Archlinux] Install package: postgresql"
+  pacman:
+    name: postgresql postgresql-contrib
+    state: present
+    update_cache: yes  
+  when:
+    - ansible_distribution == "Archlinux" 
+    - services.postgresql.enable is defined
+    - services.postgresql.enable is true
+
+
+- name: "[OpenRC] Enable and start service: postgresql"
+  service:
+    name: postgresql
+    enabled: yes
+    state: started
+  when:
+    - ansible_service_mgr == "openrc"
+    - services.postgresql.enable is defined
+    - services.postgresql.enable is true
+
+- name: "[systemd] Enable and start service: postgresql"
+  systemd:
+    name: postgresql
+    enabled: yes
+    state: started
+  when:
+    - ansible_service_mgr == "systemd"
+    - services.postgresql.enable is defined
+    - services.postgresql.enable is true
+
+
+
+
+
+- name: "[OpenRC] Disable and stop service: postgresql"
+  service:
+    name: postgresql
+    enabled: no
+    state: stopped
+  when:
+    - ansible_service_mgr == "openrc"
+    - services.postgresql.enable is defined
+    - services.postgresql.enable is false
+
+- name: "[systemd] Disable and stop service: postgresql"
+  systemd:
+    name: postgresql
+    enabled: no
+    state: stopped
+  when:
+    - ansible_service_mgr == "systemd"
+    - services.postgresql.enable is defined
+    - services.postgresql.enable is false
+
+- name: "[Alpine] Remove package: postgresql"
+  apk:
+    name: postgresql postgresql-contrib
+    state: absent
+  when:
+    - ansible_distribution == "Alpine" 
+    - services.postgresql.enable is defined
+    - services.postgresql.enable is false
+
+- name: "[Archlinux] Remove package: postgresql"
+  pacman:
+    name: postgresql postgresql-contrib
+    state: absent
+  when:
+    - ansible_distribution == "Archlinux" 
+    - services.postgresql.enable is defined
+    - services.postgresql.enable is false
diff --git a/roles/prometheus/tasks/checks.yml b/roles/prometheus/tasks/checks.yml
@@ -0,0 +1,34 @@
+---
+
+- fail: msg="Option 'services.prometheus.config' has to be set!"
+  when:
+    - services.prometheus.config is not defined
+
+
+- fail: msg="Option 'services.prometheus.nginx.domain' has to be set when using nginx!"
+  when:
+    - services.prometheus.nginx.enable is defined
+    - services.prometheus.nginx.enable is true
+    - services.prometheus.nginx.domain is not defined
+
+- fail: msg="Option 'services.prometheus.nginx.sslOnly' has to be set when using nginx!"
+  when:
+    - services.prometheus.nginx.enable is defined
+    - services.prometheus.nginx.enable is true
+    - services.prometheus.nginx.sslOnly is not defined
+
+- fail: msg="Option 'services.prometheus.nginx.ssl.cert' has to be set when using nginx with ssl!"
+  when:
+    - services.prometheus.nginx.enable is defined
+    - services.prometheus.nginx.enable is true
+    - services.prometheus.nginx.ssl.enable is defined
+    - services.prometheus.nginx.ssl.enable is true
+    - services.prometheus.nginx.ssl.cert is not defined
+
+- fail: msg="Option 'services.prometheus.nginx.ssl.privkey' has to be set when using nginx with ssl!"
+  when:
+    - services.prometheus.nginx.enable is defined
+    - services.prometheus.nginx.enable is true
+    - services.prometheus.nginx.ssl.enable is defined
+    - services.prometheus.nginx.ssl.enable is true
+    - services.prometheus.nginx.ssl.privkey is not defined
diff --git a/roles/prometheus/tasks/configure.yml b/roles/prometheus/tasks/configure.yml
@@ -0,0 +1,6 @@
+---
+
+- name: "Generate config: /etc/prometheus/prometheus.yml" 
+  copy:
+    content: "{{ services.prometheus.config | to_nice_yaml }}"
+    dest: /etc/prometheus/prometheus.yml
diff --git a/roles/prometheus/tasks/install.yml b/roles/prometheus/tasks/install.yml
@@ -0,0 +1,17 @@
+---
+
+- name: "[Alpine] Install package: prometheus"
+  apk:
+    name: prometheus
+    state: present
+    update_cache: yes
+  when:
+    - ansible_distribution == "Alpine" 
+
+- name: "[Archlinux] Install package: prometheus"
+  pacman:
+    name: prometheus
+    state: present
+    update_cache: yes
+  when:
+    - ansible_distribution == "Archlinux" 
diff --git a/roles/prometheus/tasks/main.yml b/roles/prometheus/tasks/main.yml
@@ -0,0 +1,38 @@
+---
+
+- include: checks.yml
+  when:
+    - services.prometheus.enable is defined
+    - services.prometheus.enable is true
+
+- include: install.yml
+  when:
+    - services.prometheus.enable is defined
+    - services.prometheus.enable is true
+
+- include: configure.yml
+  when:
+    - services.prometheus.enable is defined
+    - services.prometheus.enable is true
+
+- include: start.yml
+  when:
+    - services.prometheus.enable is defined
+    - services.prometheus.enable is true
+
+- include: nginx.yml
+  when:
+    - services.prometheus.enable is defined
+    - services.prometheus.enable is true
+    - services.prometheus.nginx is defined
+    - services.prometheus.nginx.enable is true
+
+- include: remove.yml
+  when:
+    - services.prometheus.enable is defined
+    - services.prometheus.enable is false
+
+
+
+
+
diff --git a/roles/prometheus/tasks/nginx.yml b/roles/prometheus/tasks/nginx.yml
@@ -0,0 +1,23 @@
+---
+
+- name: "[nginx] Create vhost" 
+  template:
+    src: nginx-vhost.conf.j2
+    dest: /etc/nginx/conf.d/prometheus.conf
+    mode: 0644
+    owner: nginx
+    group: nginx
+
+- name: "[OpenRC] Restart service: nginx"
+  service:
+    name: nginx
+    state: restarted
+  when:
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Restart service: nginx"
+  systemd:
+    name: nginx
+    state: restarted
+  when:
+    - ansible_service_mgr == "systemd"+
\ No newline at end of file
diff --git a/roles/prometheus/tasks/remove.yml b/roles/prometheus/tasks/remove.yml
@@ -0,0 +1,39 @@
+---
+
+- name: "[OpenRC] Disable and stop service: prometheus"
+  service:
+    name: prometheus
+    enabled: no
+    state: stopped
+  when:
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Disable and stop service: prometheus"
+  systemd:
+    name: prometheus
+    enabled: no
+    state: stopped
+  when:
+    - ansible_service_mgr == "systemd"
+
+- name: "[Alpine] Remove package: prometheus"
+  apk:
+    name: prometheus
+    state: absent
+  when:
+    - ansible_distribution == "Alpine" 
+
+- name: "[Archlinux] Remove package: prometheus"
+  pacman:
+    name: prometheus
+    state: absent
+  when:
+    - ansible_distribution == "Archlinux" 
+
+- name: "Delete leftovers"
+  file:
+    path: "{{item}}"
+    state: absent
+  with_items:
+    - /etc/nginx/conf.d/prometheus.conf
+    - /etc/prometheus+
\ No newline at end of file
diff --git a/roles/prometheus/tasks/start.yml b/roles/prometheus/tasks/start.yml
@@ -0,0 +1,17 @@
+---
+
+- name: "[OpenRC] Restart and enable service: prometheus"
+  service:
+    name: prometheus
+    enabled: yes
+    state: restarted
+  when:
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Restart and enable service: prometheus"
+  systemd:
+    name: prometheus
+    enabled: yes
+    state: restarted
+  when:
+    - ansible_service_mgr == "systemd"
diff --git a/roles/prometheus/templates/nginx-vhost.conf.j2 b/roles/prometheus/templates/nginx-vhost.conf.j2
@@ -0,0 +1,33 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+{% if services.prometheus.nginx.sslOnly is false %}
+server {
+	listen 80 ;
+	listen [::]:80;
+	
+	server_name {{ services.prometheus.nginx.domain }};
+
+	location / {
+		proxy_pass http://127.0.0.1:9090;
+	}
+}
+
+{% endif %}
+{% if services.prometheus.nginx.ssl.enable is true %}
+server {
+	listen 443 ssl;
+	listen [::]:443 ssl;
+
+	ssl_certificate "{{ services.prometheus.nginx.ssl.cert }}";
+	ssl_certificate_key "{{ services.prometheus.nginx.ssl.privkey }}";
+	include /etc/nginx/ssl.conf;
+	
+	server_name {{ services.prometheus.nginx.domain }};
+
+	location / {
+		proxy_pass http://127.0.0.1:9090;
+	}
+}
+{% endif %}
diff --git a/roles/radicale/tasks/checks.yml b/roles/radicale/tasks/checks.yml
@@ -0,0 +1,44 @@
+
+- fail: msg="Option 'services.radicale.configFile' has to be set!"
+  when:
+    - services.radicale.configFile is not defined
+
+- fail: msg="Option 'services.radicale.users' has to be set!"
+  when:
+    - services.radicale.users is not defined
+
+- fail: msg="Option 'services.radicale.configFile' has to be set!"
+  when:
+    - services.radicale.configFile is not defined
+
+
+
+- fail: msg="Option 'services.radicale.nginx.domain' has to be set when using nginx!"
+  when:
+    - services.radicale.nginx.enable is defined
+    - services.radicale.nginx.enable is true
+    - services.radicale.nginx.domain is not defined
+
+- fail: msg="Option 'services.radicale.nginx.sslOnly' has to be set when using nginx!"
+  when:
+    - services.radicale.nginx.enable is defined
+    - services.radicale.nginx.enable is true
+    - services.radicale.nginx.sslOnly is not defined
+
+- fail: msg="Option 'services.radicale.nginx.ssl.cert' has to be set when using nginx with ssl!"
+  when:
+    - services.radicale.nginx.enable is defined
+    - services.radicale.nginx.enable is true
+    - services.radicale.nginx.ssl.enable is defined
+    - services.radicale.nginx.ssl.enable is true
+    - services.radicale.nginx.ssl.cert is not defined
+
+- fail: msg="Option 'services.radicale.nginx.ssl.privkey' has to be set when using nginx with ssl!"
+  when:
+    - services.radicale.nginx.enable is defined
+    - services.radicale.nginx.enable is true
+    - services.radicale.nginx.ssl.enable is defined
+    - services.radicale.nginx.ssl.enable is true
+    - services.radicale.nginx.ssl.privkey is not defined
+
+#todo: check that certs exists+
\ No newline at end of file
diff --git a/roles/radicale/tasks/configure.yml b/roles/radicale/tasks/configure.yml
@@ -0,0 +1,17 @@
+---
+
+- name: copy radicale-config to destination host 
+  copy: 
+    src: "{{ services.radicale.configFile }}"
+    dest: /etc/radicale/config
+    mode: 0640
+    owner: root
+    group: radicale
+
+- name: "Create file: /etc/radicale/users"
+  copy: 
+    content: "{{ services.radicale.users }}"
+    dest: /etc/radicale/users
+    mode: 0640
+    owner: root
+    group: radicale
diff --git a/roles/radicale/tasks/install.yml b/roles/radicale/tasks/install.yml
@@ -0,0 +1,17 @@
+---
+
+- name: "[Alpine] Install package: radicale"
+  apk:
+    name: radicale
+    state: present
+    update_cache: yes
+  when:
+    - ansible_distribution == "Alpine" 
+
+- name: "[Archlinux] Install package: radicale"
+  pacman:
+    name: radicale
+    state: present
+    update_cache: yes
+  when:
+    - ansible_distribution == "Archlinux" 
diff --git a/roles/radicale/tasks/main.yml b/roles/radicale/tasks/main.yml
@@ -0,0 +1,33 @@
+---
+
+- include: checks.yml
+  when:
+    - services.radicale.enable is defined
+    - services.radicale.enable is true
+
+- include: install.yml
+  when:
+    - services.radicale.enable is defined
+    - services.radicale.enable is true
+
+- include: configure.yml
+  when:
+    - services.radicale.enable is defined
+    - services.radicale.enable is true
+
+- include: start.yml
+  when:
+    - services.radicale.enable is defined
+    - services.radicale.enable is true
+
+- include: nginx.yml
+  when:
+    - services.radicale.enable is defined
+    - services.radicale.enable is true
+    - services.radicale.nginx.enable is defined
+    - services.radicale.nginx.enable is true
+
+- include: remove.yml
+  when:
+    - services.radicale.enable is defined
+    - services.radicale.enable is false
diff --git a/roles/radicale/tasks/nginx.yml b/roles/radicale/tasks/nginx.yml
@@ -0,0 +1,23 @@
+---
+
+- name: "[nginx] Create vhost" 
+  template:
+    src: nginx-vhost.conf.j2
+    dest: /etc/nginx/conf.d/radicale.conf
+    mode: 0644
+    owner: nginx
+    group: nginx
+
+- name: "[OpenRC] Restart service: nginx"
+  service:
+    name: nginx
+    state: restarted
+  when:
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Restart service: nginx"
+  systemd:
+    name: nginx
+    state: restarted
+  when:
+    - ansible_service_mgr == "systemd"
diff --git a/roles/radicale/tasks/remove.yml b/roles/radicale/tasks/remove.yml
@@ -0,0 +1,39 @@
+---
+
+- name: "[OpenRC] Disable and stop service: radicale"
+  service:
+    name: radicale
+    enabled: no
+    state: stopped
+  when:
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Disable and stop service: radicale"
+  systemd:
+    name: radicale
+    enabled: no
+    state: stopped
+  when:
+    - ansible_service_mgr == "systemd"
+
+- name: "[Alpine] Remove package: radicale"
+  apk:
+    name: radicale
+    state: absent
+  when:
+    - ansible_distribution == "Alpine" 
+
+- name: "[Archlinux] Remove package: radicale"
+  pacman:
+    name: radicale
+    state: absent
+  when:
+    - ansible_distribution == "Archlinux" 
+
+- name: "Delete leftovers"
+  file:
+    path: "{{item}}"
+    state: absent
+  with_items:
+    - /etc/nginx/conf.d/radicale.conf
+    - /etc/radicale+
\ No newline at end of file
diff --git a/roles/radicale/tasks/start.yml b/roles/radicale/tasks/start.yml
@@ -0,0 +1,17 @@
+---
+
+- name: "[OpenRC] Enable and restart service: radicale"
+  service:
+    name: radicale
+    enabled: yes
+    state: restarted
+  when:
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Enable and restart service: radicale"
+  systemd:
+    name: radicale
+    enabled: yes
+    state: restarted
+  when:
+    - ansible_service_mgr == "systemd"
diff --git a/roles/radicale/templates/nginx-vhost.conf.j2 b/roles/radicale/templates/nginx-vhost.conf.j2
@@ -0,0 +1,39 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+{% if  services.radicale.nginx.sslOnly is not defined or services.radicale.nginx.sslOnly is false %}
+server {
+	listen 80 ;
+	listen [::]:80;
+	
+	server_name {{ services.radicale.nginx.domain }};
+
+	location / {
+		proxy_pass       http://localhost:5232/; # The / is important!
+		proxy_set_header Host $host;
+		proxy_set_header X-Real-IP $remote_addr;
+		proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+	}
+}
+
+{% endif %}
+{% if services.radicale.nginx.ssl.enable is true %}
+server {
+	listen 443 ssl;
+	listen [::]:443 ssl;
+
+	ssl_certificate "{{ services.radicale.nginx.ssl.cert }}";
+	ssl_certificate_key "{{ services.radicale.nginx.ssl.privkey }}";
+	include /etc/nginx/ssl.conf;
+	
+	server_name {{ services.radicale.nginx.domain }};
+
+	location / {
+		proxy_pass       http://localhost:5232/; # The / is important!
+		proxy_set_header Host $host;
+		proxy_set_header X-Real-IP $remote_addr;
+		proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+	}
+}
+{% endif %}
diff --git a/roles/rest-server/tasks/checks.yml b/roles/rest-server/tasks/checks.yml
@@ -0,0 +1,45 @@
+---
+
+- fail: msg="This Role only works on Archlinux when Option 'system.enableOwnRepos' is true!"
+  when:
+    - ansible_distribution == "Archlinux" 
+    - system.enableOwnRepos is false
+
+- fail: msg="Option 'services.rest_server.user' has to be set!"
+  when:
+    - services.rest_server.user is not defined
+
+- fail: msg="Option 'services.rest_server.port' has to be set!"
+  when:
+    - services.rest_server.port is not defined
+
+#todo: check that user exists
+
+
+- fail: msg="Option 'services.rest_server.nginx.domain' has to be set when using nginx!"
+  when:
+    - services.rest_server.nginx.enable is defined
+    - services.rest_server.nginx.enable is true
+    - services.rest_server.nginx.domain is not defined
+
+- fail: msg="Option 'services.rest_server.nginx.sslOnly' has to be set when using nginx!"
+  when:
+    - services.rest_server.nginx.enable is defined
+    - services.rest_server.nginx.enable is true
+    - services.rest_server.nginx.sslOnly is not defined
+
+- fail: msg="Option 'services.rest_server.nginx.ssl.cert' has to be set when using nginx with ssl!"
+  when:
+    - services.rest_server.nginx.enable is defined
+    - services.rest_server.nginx.enable is true
+    - services.rest_server.nginx.ssl.enable is defined
+    - services.rest_server.nginx.ssl.enable is true
+    - services.rest_server.nginx.ssl.cert is not defined
+
+- fail: msg="Option 'services.rest_server.nginx.ssl.privkey' has to be set when using nginx with ssl!"
+  when:
+    - services.rest_server.nginx.enable is defined
+    - services.rest_server.nginx.enable is true
+    - services.rest_server.nginx.ssl.enable is defined
+    - services.rest_server.nginx.ssl.enable is true
+    - services.rest_server.nginx.ssl.privkey is not defined
diff --git a/roles/rest-server/tasks/configure.yml b/roles/rest-server/tasks/configure.yml
@@ -0,0 +1,24 @@
+---
+
+- name: "Create directory: /var/lib/rest-server"
+  file:
+    path: /var/lib/rest-server
+    state: directory
+    owner: "{{ services.rest_server.user }}"
+    group: "{{ services.rest_server.user }}"
+
+- name: "[OpenRC] Create service file" 
+  template:
+    src: openrc-service.j2
+    dest: /etc/init.d/rest-server
+    mode: 0755
+  when:
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Create service file" 
+  template:
+    src: systemd-service.j2
+    dest: /etc/systemd/system/rest-server.service
+    mode: 0755
+  when:
+    - ansible_service_mgr == "systemd"+
\ No newline at end of file
diff --git a/roles/rest-server/tasks/install.yml b/roles/rest-server/tasks/install.yml
@@ -0,0 +1,17 @@
+---
+
+- name: "[Alpine] Install package: rest-server"
+  apk:
+    name: rest-server
+    state: present
+    update_cache: yes
+  when:
+    - ansible_distribution == "Alpine" 
+
+- name: "[Archlinux] Install package: rest-server"
+  pacman:
+    name: rest-server
+    state: present
+    update_cache: yes
+  when:
+    - ansible_distribution == "Archlinux" 
diff --git a/roles/rest-server/tasks/main.yml b/roles/rest-server/tasks/main.yml
@@ -0,0 +1,34 @@
+---
+
+- include: checks.yml
+  when:
+    - services.rest_server.enable is defined
+    - services.rest_server.enable is true
+
+- include: install.yml
+  when:
+    - services.rest_server.enable is defined
+    - services.rest_server.enable is true
+
+- include: configure.yml
+  when:
+    - services.rest_server.enable is defined
+    - services.rest_server.enable is true
+
+- include: start.yml
+  when:
+    - services.rest_server.enable is defined
+    - services.rest_server.enable is true
+
+- include: nginx.yml
+  when:
+    - services.rest_server.enable is defined
+    - services.rest_server.enable is true
+    - services.rest_server.nginx is defined
+    - services.rest_server.nginx.enable is true
+
+- include: remove.yml
+  when:
+    - services.rest_server.enable is defined
+    - services.rest_server.enable is false
+
diff --git a/roles/rest-server/tasks/nginx.yml b/roles/rest-server/tasks/nginx.yml
@@ -0,0 +1,33 @@
+---
+
+- name: "[nginx] Place password-file: /etc/nginx/passwd/rest-server"
+  copy:
+    content: "{{ services.rest_server.nginx.password }}"
+    dest: /etc/nginx/passwd/rest-server
+    owner: nginx
+    group: nginx
+    mode: 0700
+  when:
+    - services.rest_server.nginx.password is defined
+
+- name: "[nginx] Create vhost" 
+  template: 
+    src: nginx-vhost.conf.j2
+    dest: /etc/nginx/conf.d/rest-server.conf
+    mode: 0644
+    owner: nginx
+    group: nginx
+
+- name: "[OpenRC] Restart service: nginx"
+  service:
+    name: nginx
+    state: restarted
+  when:
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Restart service: nginx"
+  systemd:
+    name: nginx
+    state: restarted
+  when:
+    - ansible_service_mgr == "systemd"
diff --git a/roles/rest-server/tasks/remove.yml b/roles/rest-server/tasks/remove.yml
@@ -0,0 +1,43 @@
+---
+
+- name: "[OpenRC] Disable and stop service: rest-server"
+  service:
+    name: rest-server
+    enabled: no
+    state: stopped
+  when:
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Disable and stop service: rest_server"
+  systemd:
+    name: rest-server
+    enabled: no
+    state: stopped
+  when: 
+    - ansible_service_mgr == "systemd"
+
+
+- name: "[Alpine] Remove package: rest-server"
+  apk:
+    name: rest-server
+    state: absent
+  when:
+    - ansible_distribution == "Alpine" 
+
+- name: "[Archlinux] Remove package: rest-server"
+  pacman:
+    name: rest-server
+    state: absent
+  when:
+    - ansible_distribution == "Archlinux" 
+
+
+- name: "Delete leftovers"
+  file:
+    path: "{{item}}"
+    state: absent
+  with_items:
+    - /etc/nginx/passwd/rest-server
+    - /etc/nginx/conf.d/rest-server.conf
+    - /etc/systemd/system/rest-server.service
+    - /etc/init.d/rest-server+
\ No newline at end of file
diff --git a/roles/rest-server/tasks/start.yml b/roles/rest-server/tasks/start.yml
@@ -0,0 +1,17 @@
+---
+
+- name: "[OpenRC] Enable and start service: rest-server"
+  service:
+    name: rest-server
+    enabled: yes
+    state: started
+  when:
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Enable and start service: rest_server"
+  systemd:
+    name: rest-server
+    enabled: yes
+    state: started
+  when: 
+    - ansible_service_mgr == "systemd"
diff --git a/roles/rest-server/templates/nginx-vhost.conf.j2 b/roles/rest-server/templates/nginx-vhost.conf.j2
@@ -0,0 +1,45 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+{% if  services.rest_server.nginx.sslOnly is not defined or services.rest_server.nginx.sslOnly is false %}
+server {
+	listen 80 ;
+	listen [::]:80;
+	
+	server_name {{ services.rest_server.nginx.domain }};
+
+	location / {
+		proxy_pass http://oeffiweb;
+	}
+
+	location /assets {
+		root /usr/share/oeffi-web;
+	}
+}
+
+{% endif %}
+{% if services.rest_server.nginx.ssl.enable is true %}
+server {
+	listen 443 ssl;
+	listen [::]:443 ssl;
+
+	ssl_certificate "{{ services.rest_server.nginx.ssl.cert }}";
+	ssl_certificate_key "{{ services.rest_server.nginx.ssl.privkey }}";
+	include /etc/nginx/ssl.conf;
+	
+	server_name {{ services.rest_server.nginx.domain }};
+
+	{% if services.rest_server.nginx.password is defined %}
+	auth_basic           "hello";
+	auth_basic_user_file /etc/nginx/passwd/rest-server; 
+	{% endif %}
+
+	location / {
+		proxy_pass http://127.0.0.1:{{ services.rest_server.port }}/;
+		client_max_body_size 500M;
+		include /etc/nginx/proxy.conf;
+	}
+}
+{% endif %}
+
diff --git a/roles/rest-server/templates/openrc-service.j2 b/roles/rest-server/templates/openrc-service.j2
@@ -0,0 +1,20 @@
+#!/sbin/openrc-run
+
+#
+# !!! This file is managed by Ansible !!!
+#
+
+supervisor=supervise-daemon
+
+name="rest-server"
+description="Rest Server is a high performance HTTP server that implements restic's REST backend API."
+
+command="/usr/bin/rest-server"
+command_args="--append-only --listen 127.0.0.1:{{ services.rest_server.port }} --no-auth --path /var/lib/rest-server --prometheus"
+command_user="{{ services.rest_server.user }}:{{ services.rest_server.user }}"
+directory="/var/lib/rest-server"
+
+depend() {
+	need net localmount
+	after firewall
+}
diff --git a/roles/rest-server/templates/systemd-service.j2 b/roles/rest-server/templates/systemd-service.j2
@@ -0,0 +1,26 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+[Unit]
+Description=Rest Server
+After=syslog.target
+After=network.target
+
+[Service]
+Type=simple
+User={{ services.rest_server.user }}
+Group={{ services.rest_server.user }}
+ExecStart=/usr/bin/rest-server --append-only --listen 127.0.0.1:{{ services.rest_server.port }} --no-auth --path /var/lib/rest-server --prometheus
+Restart=always
+RestartSec=5
+
+# Optional security enhancements
+NoNewPrivileges=yes
+PrivateTmp=yes
+ProtectSystem=strict
+ProtectHome=yes
+ReadWritePaths=/var/lib/rest-server
+
+[Install]
+WantedBy=multi-user.target+
\ No newline at end of file
diff --git a/roles/synapse/meta/main.yaml b/roles/synapse/meta/main.yaml
@@ -0,0 +1,4 @@
+---
+
+dependencies:
+  - role: postgresql
diff --git a/roles/synapse/tasks/checks.yml b/roles/synapse/tasks/checks.yml
@@ -0,0 +1,47 @@
+---
+
+- fail: msg="This role depends on postgres!"
+  when:
+    - services.postgresql.enable is not defined or services.postgresql.enable is not true
+
+- fail: msg="Option 'services.synapse.configPath' has to be set!"
+  when:
+    - services.synapse.configPath is not defined
+
+- fail: msg="Option 'services.synapse.webClient.configFile' has to be set when 'services.synapse.webClient.enable' is true!"
+  when:
+    - services.synapse.webClient.enable is not defined
+    - services.synapse.webClient.enable is true
+    - services.synapse.webClient.configFile is not defined
+
+
+
+- fail: msg="Option 'services.synapse.nginx.domain' has to be set when using nginx!"
+  when:
+    - services.synapse.nginx.enable is defined
+    - services.synapse.nginx.enable is true
+    - services.synapse.nginx.domain is not defined
+
+- fail: msg="Option 'services.synapse.nginx.sslOnly' has to be set when using nginx!"
+  when:
+    - services.synapse.nginx.enable is defined
+    - services.synapse.nginx.enable is true
+    - services.synapse.nginx.sslOnly is not defined
+
+- fail: msg="Option 'services.synapse.nginx.ssl.cert' has to be set when using nginx with ssl!"
+  when:
+    - services.synapse.nginx.enable is defined
+    - services.synapse.nginx.enable is true
+    - services.synapse.nginx.ssl.enable is defined
+    - services.synapse.nginx.ssl.enable is true
+    - services.synapse.nginx.ssl.cert is not defined
+
+- fail: msg="Option 'services.synapse.nginx.ssl.privkey' has to be set when using nginx with ssl!"
+  when:
+    - services.synapse.nginx.enable is defined
+    - services.synapse.nginx.enable is true
+    - services.synapse.nginx.ssl.enable is defined
+    - services.synapse.nginx.ssl.enable is true
+    - services.synapse.nginx.ssl.privkey is not defined
+
+#todo: check that certs exists+
\ No newline at end of file
diff --git a/roles/synapse/tasks/configure.yml b/roles/synapse/tasks/configure.yml
@@ -0,0 +1,39 @@
+---
+
+- name: "Copy configs to: /etc/synapse"
+  copy:
+    src: "{{ services.synapse.configPath }}"
+    dest: "/etc/synapse"
+    mode: 0755
+    owner: synapse
+    group: synapse
+
+- name: "[Alpine] Copy web-config to: /etc/riot-web/config.json"
+  copy:
+    src: "{{ services.synapse.webClient.configFile }}"
+    dest: "/etc/riot-web/config.json"
+    mode: 0644
+  when:
+    - ansible_distribution == "Alpine"
+    - services.synapse.webClient.enable is defined
+    - services.synapse.webClient.enable is true
+
+- name: "[Archlinux] Copy web-config to: /etc/webapps/element/onfig.json"
+  copy:
+    src: "{{ services.synapse.webClientConfig }}"
+    dest: "/etc/webapps/element/config.json"
+    mode: 0644
+  when:
+    - ansible_distribution == "Archlinux" 
+    - services.synapse.webClient.enable is defined
+    - services.synapse.webClient.enable is true
+
+- name: "[Alpine] Create directory: /var/log/synapse"
+  file: 
+    path: "/var/log/synapse"
+    state: directory
+    mode: 0755
+    owner: synapse
+    group: synapse
+  when:
+    - ansible_distribution == "Alpine" 
diff --git a/roles/synapse/tasks/install.yml b/roles/synapse/tasks/install.yml
@@ -0,0 +1,39 @@
+---
+
+- name: "[Alpine] Install package: synapse"
+  apk:
+    name: synapse
+    state: present
+    update_cache: yes
+  when:
+    - ansible_distribution == "Alpine" 
+
+- name: "[Alpine] Install package: riot-web"
+  apk:
+    name: riot-web
+    state: present
+    update_cache: yes
+  when:
+    - ansible_distribution == "Alpine" 
+    - services.synapse.webClient.enable is defined
+    - services.synapse.webClient.enable is true
+
+
+
+- name: "[Archlinux] Install package: matrix-synapse"
+  pacman:
+    name: matrix-synapse
+    state: present
+    update_cache: yes
+  when:
+    - ansible_distribution == "Archlinux" 
+
+- name: "[Archlinux] Install package: element-web"
+  pacman:
+    name: element-web
+    state: present
+    update_cache: yes
+  when:
+    - ansible_distribution == "Archlinux" 
+    - services.synapse.webClient.enable is defined
+    - services.synapse.webClient.enable is true
diff --git a/roles/synapse/tasks/main.yml b/roles/synapse/tasks/main.yml
@@ -0,0 +1,34 @@
+---
+
+- include: checks.yml
+  when:
+    - services.synapse.enable is defined
+    - services.synapse.enable is true
+
+- include: install.yml
+  when:
+    - services.synapse.enable is defined
+    - services.synapse.enable is true
+
+- include: configure.yml
+  when:
+    - services.synapse.enable is defined
+    - services.synapse.enable is true
+
+- include: start.yml
+  when:
+    - services.synapse.enable is defined
+    - services.synapse.enable is true
+
+- include: nginx.yml
+  when:
+    - services.synapse.enable is defined
+    - services.synapse.enable is true
+    - services.synapse.nginx is defined
+    - services.synapse.nginx.enable is true
+
+- include: remove.yml
+  when:
+    - services.synapse.enable is defined
+    - services.synapse.enable is false
+
diff --git a/roles/synapse/tasks/nginx.yml b/roles/synapse/tasks/nginx.yml
@@ -0,0 +1,24 @@
+---
+
+- name: "[nginx] Create vhost" 
+  template: 
+    src: nginx-vhost.conf.j2
+    dest: /etc/nginx/conf.d/synapse.conf
+    mode: 0644
+    owner: nginx
+    group: nginx
+
+
+- name: "[OpenRC] Restart service: nginx"
+  service:
+    name: nginx
+    state: restarted
+  when:
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Restart service: nginx"
+  systemd:
+    name: nginx
+    state: restarted
+  when:
+    - ansible_service_mgr == "systemd"
diff --git a/roles/synapse/tasks/remove.yml b/roles/synapse/tasks/remove.yml
@@ -0,0 +1,40 @@
+---
+
+- name: "[OpenRC] Disable and stop service: synapse"
+  service:
+    name: synapse
+    enabled: no
+    state: stopped
+  when:
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Disable and stop service: synapse"
+  systemd:
+    name: synapse
+    enabled: no
+    state: stopped
+  when:
+    - ansible_service_mgr == "systemd"
+
+- name: "[Alpine] Remove package: synapse, riot-web"
+  apk:
+    name: synapse riot-web
+    state: absent
+  when:
+    - ansible_distribution == "Alpine" 
+
+- name: "[Archlinux] Remove package: matrix-synapse, element-web"
+  pacman:
+    name: matrix-synapse element-web
+    state: absent
+  when:
+    - ansible_distribution == "Archlinux" 
+
+- name: "Delete leftovers"
+  file:
+    path: "{{item}}"
+    state: absent
+  with_items:
+    - /etc/nginx/conf.d/synapse.conf
+    - /etc/synapse
+    - /etc/webapps/element+
\ No newline at end of file
diff --git a/roles/synapse/tasks/start.yml b/roles/synapse/tasks/start.yml
@@ -0,0 +1,17 @@
+---
+
+- name: "[OpenRC] Enable and restart service: synapse"
+  service:
+    name: synapse
+    enabled: yes
+    state: restarted
+  when:
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Enable and restart service: synapse"
+  systemd:
+    name: synapse
+    enabled: yes
+    state: restarted
+  when:
+    - ansible_service_mgr == "systemd"+
\ No newline at end of file
diff --git a/roles/synapse/templates/nginx-vhost.conf.j2 b/roles/synapse/templates/nginx-vhost.conf.j2
@@ -0,0 +1,73 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+{% if services.synapse.nginx.sslOnly is false %}
+server {
+	listen 80 ;
+	listen [::]:80;
+	
+	server_name {{ services.synapse.nginx.domain }};
+
+	{% if services.synapse.webClient.enable is defined and services.synapse.webClient.enable is true %}
+	location /_matrix {
+		proxy_pass http://127.0.0.1:8008;
+		proxy_set_header X-Forwarded-For $remote_addr;
+		client_max_body_size 100M;
+	}
+
+	{% if ansible_distribution == "Alpine" %}
+	location / {
+		root /usr/share/webapps/riot-web;
+	}
+	{% else %}
+	location / {
+		root /usr/share/webapps/element;
+	}
+	{% endif %}
+	{% else %}
+	location / {
+		proxy_pass http://127.0.0.1:8008;
+		proxy_set_header X-Forwarded-For $remote_addr;
+		client_max_body_size 100M;
+	}
+	{% endif %}
+}
+
+{% endif %}
+{% if services.synapse.nginx.ssl.enable is true %}
+server {
+	listen 443 ssl;
+	listen [::]:443 ssl;
+
+	ssl_certificate "{{ services.synapse.nginx.ssl.cert }}";
+	ssl_certificate_key "{{ services.synapse.nginx.ssl.privkey }}";
+	include /etc/nginx/ssl.conf;
+	
+	server_name {{ services.synapse.nginx.domain }};
+
+	{% if services.synapse.webClient.enable is defined and services.synapse.webClient.enable is true %}
+	location /_matrix {
+		proxy_pass http://127.0.0.1:8008;
+		proxy_set_header X-Forwarded-For $remote_addr;
+		client_max_body_size 100M;
+	}
+
+	{% if ansible_distribution == "Alpine" %}
+	location / {
+		root /usr/share/webapps/riot-web;
+	}
+	{% else %}
+	location / {
+		root /usr/share/webapps/element;
+	}
+	{% endif %}
+	{% else %}
+	location / {
+		proxy_pass http://127.0.0.1:8008;
+		proxy_set_header X-Forwarded-For $remote_addr;
+		client_max_body_size 100M;
+	}
+	{% endif %}
+}
+{% endif %}
diff --git a/alpine/config-files/awall/syncthing.json b/roles/syncthing/files/awall-rule.json
diff --git a/roles/syncthing/tasks/checks.yml b/roles/syncthing/tasks/checks.yml
@@ -0,0 +1,36 @@
+---
+
+- fail: msg="Option 'services.syncthing.user' has to be set!"
+  when:
+    - services.syncthing.user is not defined
+
+#todo: check that user exists
+
+
+- fail: msg="Option 'services.syncthing.nginx.domain' has to be set when using nginx!"
+  when:
+    - services.syncthing.nginx.enable is defined
+    - services.syncthing.nginx.enable is true
+    - services.syncthing.nginx.domain is not defined
+
+- fail: msg="Option 'services.syncthing.nginx.sslOnly' has to be set when using nginx!"
+  when:
+    - services.syncthing.nginx.enable is defined
+    - services.syncthing.nginx.enable is true
+    - services.syncthing.nginx.sslOnly is not defined
+
+- fail: msg="Option 'services.syncthing.nginx.ssl.cert' has to be set when using nginx with ssl!"
+  when:
+    - services.syncthing.nginx.enable is defined
+    - services.syncthing.nginx.enable is true
+    - services.syncthing.nginx.ssl.enable is defined
+    - services.syncthing.nginx.ssl.enable is true
+    - services.syncthing.nginx.ssl.cert is not defined
+
+- fail: msg="Option 'services.syncthing.nginx.ssl.privkey' has to be set when using nginx with ssl!"
+  when:
+    - services.syncthing.nginx.enable is defined
+    - services.syncthing.nginx.enable is true
+    - services.syncthing.nginx.ssl.enable is defined
+    - services.syncthing.nginx.ssl.enable is true
+    - services.syncthing.nginx.ssl.privkey is not defined
diff --git a/roles/syncthing/tasks/configure.yml b/roles/syncthing/tasks/configure.yml
@@ -0,0 +1,28 @@
+---
+
+- name: "[OpenRC] Create service file: syncthing-{{ services.syncthing.user }}"
+  template: 
+    src: openrc-service.j2
+    dest: "/etc/init.d/syncthing-{{ services.syncthing.user }}"
+    mode: 0755
+  when: 
+    - ansible_service_mgr == "openrc"
+
+
+- name: "[awall] Create rule for: syncthing"
+  copy:
+    src: awall-rule.json
+    dest: /etc/awall/optional/syncthing.json
+    validate: jq '.' %s
+  when: 
+    - ansible_distribution == "Alpine" 
+    - network.awall.enable is true
+
+- name: "[awall] Enable rule for: syncthing"
+  awall:
+    name: syncthing
+    state: enabled
+    activate: yes
+  when: 
+    - ansible_distribution == "Alpine" 
+    - network.awall.enable is true
diff --git a/roles/syncthing/tasks/install.yml b/roles/syncthing/tasks/install.yml
@@ -0,0 +1,17 @@
+---
+
+- name: "[Alpine] Install package: syncthing"
+  apk:
+    name: syncthing
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Alpine" 
+
+- name: "[Archlinux] Install package: syncthing"
+  pacman:
+    name: syncthing
+    state: present
+    update_cache: yes
+  when: 
+    - ansible_distribution == "Archlinux" 
diff --git a/roles/syncthing/tasks/main.yml b/roles/syncthing/tasks/main.yml
@@ -0,0 +1,34 @@
+---
+
+- import_tasks: checks.yml
+  when:
+    - services.syncthing.enable is defined
+    - services.syncthing.enable is true
+
+- import_tasks: install.yml
+  when:
+    - services.syncthing.enable is defined
+    - services.syncthing.enable is true
+
+- import_tasks: configure.yml
+  when:
+    - services.syncthing.enable is defined
+    - services.syncthing.enable is true
+
+
+- import_tasks: start.yml
+  when:
+    - services.syncthing.enable is defined
+    - services.syncthing.enable is true
+
+- import_tasks: nginx.yml
+  when:
+    - services.syncthing.enable is defined
+    - services.syncthing.enable is true
+    - services.syncthing.nginx.enable is defined
+    - services.syncthing.nginx.enable is true
+
+- import_tasks: remove.yml
+  when:
+    - services.syncthing.enable is defined
+    - services.syncthing.enable is false
diff --git a/roles/syncthing/tasks/nginx.yml b/roles/syncthing/tasks/nginx.yml
@@ -0,0 +1,20 @@
+---
+
+- name: "[nginx] Create vhost" 
+  template:
+    src: nginx-vhost.conf.j2
+    dest: /etc/nginx/conf.d/syncthing.conf
+    mode: 0644
+    owner: nginx
+    group: nginx
+  when:
+    - services.syncthing.enable is true
+    - services.syncthing.nginx is defined
+    - services.syncthing.nginx.enable is true
+
+- name: "[systemd] Restart service: nginx"
+  systemd:
+    name: nginx
+    state: restarted
+  when: 
+    - ansible_service_mgr == "systemd"
diff --git a/roles/syncthing/tasks/remove.yml b/roles/syncthing/tasks/remove.yml
@@ -0,0 +1,84 @@
+---
+
+- name: "Find uid of user: {{ services.rest_server.user }}"
+  command: "id -u {{ services.syncthing.user }}"
+  register: userId
+  check_mode: no # Run even in check mode, otherwise the playbook fails with --check.
+  changed_when: false
+  when:
+    - ansible_service_mgr == "systemd"
+    - services.syncthing.user is defined
+
+- name: "Determine XDG_RUNTIME_DIR"
+  set_fact:
+    xdg_runtime_dir: "/run/user/{{ userId.stdout }}"
+  changed_when: false
+  when:
+    - ansible_service_mgr == "systemd"
+
+
+
+- name: "[awall] Disable rule for: syncthing"
+  awall:
+    name: syncthing
+    state: disabled
+    activate: yes
+  when: 
+    - ansible_distribution == "Alpine" 
+    - network.awall.enable is true
+
+- name: "[awall] Delete rule for: syncthing"
+  file:
+    path: /etc/awall/optional/syncthing.json
+    state: absent
+  when: 
+    - ansible_distribution == "Alpine" 
+    - network.awall.enable is true
+
+
+- name: "[OpenRC] Disable and stop service: syncthing-{{ services.syncthing.user }}"
+  service:
+    name: "syncthing-{{ syncthing.user }}"
+    enabled: yes
+    state: stopped
+  when: 
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Disable and stop service: syncthing"
+  environment:
+    XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}"
+  systemd:
+    name: syncthing
+    scope: user
+    enabled: no
+    state: stopped
+  become: true
+  become_user: "{{ services.syncthing.user }}"
+  when: 
+    - ansible_service_mgr == "systemd"
+
+
+- name: "[Alpine] Remove package: syncthing"
+  apk:
+    name: syncthing
+    state: absent
+  when: 
+    - ansible_distribution == "Alpine" 
+    - services.syncthing.enable is false
+
+- name: "[Archlinux] Remove package: syncthing"
+  pacman:
+    name: syncthing
+    state: absent
+  when: 
+    - ansible_distribution == "Archlinux" 
+    - services.syncthing.enable is false
+
+
+- name: "Delete leftovers"
+  file:
+    path: "{{ item }}"
+    state: absent
+  with_items:
+    - "/etc/init.d/syncthing-{{ services.syncthing.user }}"
+    - "/etc/nginx/conf.d/syncthing.conf"
diff --git a/roles/syncthing/tasks/start.yml b/roles/syncthing/tasks/start.yml
@@ -0,0 +1,45 @@
+---
+
+- name: "Enable linger for user: {{ services.syncthing.user }}"
+  command: "loginctl enable-linger {{ services.syncthing.user }}"
+  when:
+    - ansible_service_mgr == "systemd"
+    - services.syncthing.user is defined
+
+- name: "Find uid of user: {{ services.rest_server.user }}"
+  command: "id -u {{ services.syncthing.user }}"
+  register: userId
+  check_mode: no # Run even in check mode, otherwise the playbook fails with --check.
+  changed_when: false
+  when:
+    - ansible_service_mgr == "systemd"
+    - services.syncthing.user is defined
+
+- name: "Determine XDG_RUNTIME_DIR"
+  set_fact:
+    xdg_runtime_dir: "/run/user/{{ userId.stdout }}"
+  changed_when: false
+  when:
+    - ansible_service_mgr == "systemd"
+
+
+- name: "[OpenRC] Enable and start service: syncthing-{{ services.syncthing.user }}"
+  service:
+    name: "syncthing-{{ services.syncthing.user }}"
+    enabled: yes
+    state: started
+  when: 
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Enable and start service: syncthing"
+  environment:
+    XDG_RUNTIME_DIR: "{{ xdg_runtime_dir }}"
+  systemd:
+    name: syncthing
+    scope: user
+    enabled: yes
+    state: restarted
+  become: true
+  become_user: "{{ services.syncthing.user }}"
+  when: 
+    - ansible_service_mgr == "systemd"
diff --git a/roles/syncthing/templates/nginx-vhost.conf.j2 b/roles/syncthing/templates/nginx-vhost.conf.j2
@@ -0,0 +1,35 @@
+#
+# !!! This file is managed by Ansible !!!
+#
+
+{% if services.syncthing.nginx.sslOnly is false %}
+server {
+	listen 80 ;
+	listen [::]:80;
+	
+	server_name {{ services.syncthing.nginx.domain }};
+
+	location / {
+		proxy_pass       http://127.0.0.1:8384/;
+		proxy_set_header Host localhost;
+	}
+}
+
+{% endif %}
+{% if services.syncthing.nginx.ssl.enable is true %}
+server {
+	listen 443 ssl;
+	listen [::]:443 ssl;
+
+	ssl_certificate "{{ services.syncthing.nginx.ssl.cert }}";
+	ssl_certificate_key "{{ services.syncthing.nginx.ssl.privkey }}";
+	include /etc/nginx/ssl.conf;
+	
+	server_name {{ services.syncthing.nginx.domain }};
+
+	location / {
+		proxy_pass       http://127.0.0.1:8384/;
+		proxy_set_header Host localhost;
+	}
+}
+{% endif %}
diff --git a/roles/syncthing/templates/openrc-service.j2 b/roles/syncthing/templates/openrc-service.j2
@@ -0,0 +1,28 @@
+#!/sbin/openrc-run
+
+#
+# !!! This file is managed by Ansible !!!
+#
+
+name=$RC_SVCNAME
+command=/usr/bin/syncthing
+{% if services.syncthing.guiAddress is defined %}
+command_args="-no-browser -gui-address={{ services.syncthing.guiAddress }}"
+{% else %}
+command_args="-no-browser"
+{% endif %}
+command_user="{{ services.syncthing.user }}:{{ services.syncthing.user }}"
+pidfile=/run/${RC_SVCNAME}.pid
+command_background=yes
+start_stop_daemon_args="--stdout /var/log/$RC_SVCNAME/${RC_SVCNAME}.log --stderr /var/log/$RC_SVCNAME/${RC_SVCNAME}.log"
+
+depend() {
+        use logger dns
+        need net
+        after firewall
+}
+
+start_pre() {
+        checkpath --directory --owner $command_user --mode 0775 \
+                /var/log/$RC_SVCNAME
+}
diff --git a/roles/websites/tasks/ctu.cx.yml b/roles/websites/tasks/ctu.cx.yml
@@ -0,0 +1,30 @@
+---
+
+- name: "Create directory: /var/lib/websites/ctu.cx"
+  file:
+    path: /var/lib/websites/ctu.cx
+    state: directory
+    owner: leah
+    group: nginx
+
+- name: copy vhost for ctu.cx into place
+  copy:
+    src: config-files/website-vhosts/ctu.cx.conf
+    dest: /etc/nginx/conf.d/ctu.cx.conf
+    mode: 0644
+    owner: nginx
+    group: nginx
+
+- name: "[OpenRC] Restart service: nginx"
+  service:
+    name: nginx
+    state: restarted
+  when: 
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Restart service: nginx"
+  service:
+    name: nginx
+    state: restarted
+  when: 
+    - ansible_service_mgr == "systemd"
diff --git a/roles/websites/tasks/main.yml b/roles/websites/tasks/main.yml
@@ -0,0 +1,15 @@
+---
+- include: ctu.cx.yml
+  tags:
+    - install_ctu.cx
+  when: system.hostname == "wanderduene"
+
+- include: repo.f2k1.de.yml
+  tags:
+    - install_repo.f2k1.de
+  when: system.hostname == "wanderduene"
+
+- include: photos.ctu.cx.yml
+  tags:
+    - install_photos.ctu.cx
+  when: system.hostname == "taurus"
diff --git a/roles/websites/tasks/photos.ctu.cx.yml b/roles/websites/tasks/photos.ctu.cx.yml
@@ -0,0 +1,30 @@
+---
+
+- name: "create directory: /var/lib/websites/photos.ctu.cx"
+  file:
+    path: /var/lib/websites/photos.ctu.cx
+    state: directory
+    owner: leah
+    group: nginx
+
+- name: copy vhost for photos.ctu.cx into place
+  copy:
+    src: config-files/website-vhosts/photos.ctu.cx.conf
+    dest: /etc/nginx/conf.d/photos.ctu.cx.conf
+    mode: 0644
+    owner: nginx
+    group: nginx
+
+- name: "[OpenRC] Restart service: nginx"
+  service:
+    name: nginx
+    state: restarted
+  when: 
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Restart service: nginx"
+  service:
+    name: nginx
+    state: restarted
+  when: 
+    - ansible_service_mgr == "systemd"
diff --git a/roles/websites/tasks/repo.f2k1.de.yml b/roles/websites/tasks/repo.f2k1.de.yml
@@ -0,0 +1,23 @@
+---
+
+- name: copy vhost for repo.f2k1.de into place
+  copy:
+    src: config-files/website-vhosts/repo.f2k1.de.conf
+    dest: /etc/nginx/conf.d/repo.f2k1.de.conf
+    mode: 0644
+    owner: nginx
+    group: nginx
+
+- name: "[OpenRC] Restart service: nginx"
+  service:
+    name: nginx
+    state: restarted
+  when: 
+    - ansible_service_mgr == "openrc"
+
+- name: "[systemd] Restart service: nginx"
+  service:
+    name: nginx
+    state: restarted
+  when: 
+    - ansible_service_mgr == "systemd"
diff --git a/alpine/roles/wireguard/tasks/main.yml b/roles/wireguard/tasks/main.yml
diff --git a/scripts/restic-backup-wanderduene.sh b/scripts/restic-backup-wanderduene.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env sh
+
+#backup services
+for service in pleroma radicale synapse git maddy oeffisearch
+do
+  sudo -u $service restic init --password-file /var/lib/$service/restic-password --repo rest:https://restic:$(cat /var/lib/restic-password)@restic.ctu.cx/$(hostname)-$service
+  sudo -u $service restic backup --password-file /var/lib/$service/restic-password --repo rest:https://restic:$(cat /var/lib/restic-password)@restic.ctu.cx/$(hostname)-$service /var/lib/$service
+done
+
+#backup websites
+sudo -u leah restic init --password-file /var/lib/websites/restic-password --repo rest:https://restic:$(cat /var/lib/restic-password)@restic.ctu.cx/$(hostname)-websites
+sudo -u leah restic backup --password-file /var/lib/websites/restic-password --repo rest:https://restic:$(cat /var/lib/restic-password)@restic.ctu.cx/$(hostname)-websites /var/lib/websites
+
+#backup postgres
+SQLFILE=/var/lib/postgresql/backup/postgres_$(date "+%Y-%m-%d_%H:%M").sql
+sudo -u postgres mkdir /var/lib/postgresql/backup
+sudo -u postgres bash -c "pg_dumpall > $SQLFILE"
+sudo -u postgres restic init --password-file /var/lib/postgresql/restic-password --repo rest:https://restic:$(cat /var/lib/restic-password)@restic.ctu.cx/$(hostname)-postgres
+sudo -u postgres restic backup --password-file /var/lib/postgresql/restic-password --repo rest:https://restic:$(cat /var/lib/restic-password)@restic.ctu.cx/$(hostname)-postgres /var/lib/postgresql/backup
+sudo -u postgres rm -rf /var/lib/postgresql/backup