many new containers. work in progress
This commit is contained in:
parent
f0f9f608cc
commit
aed48ffdf8
31 changed files with 1042 additions and 1 deletions
7
mailserver/.env
Normal file
7
mailserver/.env
Normal file
|
@ -0,0 +1,7 @@
|
|||
HOSTNAME=mail.yourdomain.com
|
||||
|
||||
# Relay outbound mail through Amazon SES
|
||||
RELAY_HOST=email-smtp.us-west-2.amazonaws.com
|
||||
RELAY_PORT=2587
|
||||
RELAY_USER=
|
||||
RELAY_PASSWORD=
|
7
mailserver/Caddyfile
Normal file
7
mailserver/Caddyfile
Normal file
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
email admin@yourdomain.com
|
||||
}
|
||||
|
||||
mail.yourdomain.com {
|
||||
reverse_proxy * http://roundcube:80
|
||||
}
|
36
mailserver/README.md
Normal file
36
mailserver/README.md
Normal file
|
@ -0,0 +1,36 @@
|
|||
|
||||
# Mailserver Setup
|
||||
|
||||
This docker container fires up a copy of docker-mailserver.
|
||||
* The services SMTP, IMAP, POP, etc are exposed to by tunneling traffic from a public facing VPS
|
||||
* Outbound mail is sent through Amazon SES
|
||||
* Optionally, inbound mail can be received through Amazon SES (via. S3 bucket) to allow it to be backup/primary MX if you need it.
|
||||
* Make sure to update bucket information in `s3-ingest.py`
|
||||
|
||||
Steps:
|
||||
1. You'll need to update parameters in `.env` and `wireguard.conf` and `Caddyfile`
|
||||
2. Initially, comment out (from `docker-compose.yml` the two lines starting with ` - ./data/caddy/certificates`). We need to start it once without so that Caddy will fetch our certificates. Once that happens, uncomment those lines and restart.
|
||||
3. Setup Mailgun or SES for mail forwarding and enter relay config in `.env`. SES is pretty easy to work with and supports multiple sending domains with a single set of credentials.
|
||||
4. Optionally, setup a S3 bucket and configure SES to deliver inbound mail there and then update `s3-ingest.py` and uncomment the lines for mail ingestion from `docker-compose.yml`. This is handy if your VPS/ISP is blocking inbound mail ports.
|
||||
|
||||
## Front-end Server Wireguard
|
||||
|
||||
This wireguard configuration would be deployed to the public-facing VPS which will forward interesting traffic (25,465,587,993,995,80,443) through to our docker services.
|
||||
|
||||
```
|
||||
[Interface]
|
||||
Address = 10.0.0.1/24 # Private IP for the VPS in the VPN network
|
||||
ListenPort = 51820 # Default WireGuard port
|
||||
PrivateKey = ##PRIVATE KEY FOR PUBLIC SERVER##
|
||||
|
||||
# packet forwarding
|
||||
PreUp = sysctl -w net.ipv4.ip_forward=1
|
||||
|
||||
# port forwarding (HTTP) // repeat for each port
|
||||
PreUp = iptables -t nat -A PREROUTING -i eth0 -p tcp -m multiport --dports 25,465,587,993,995,80,443 -j DNAT --to-destination 10.0.0.2
|
||||
PostDown = iptables -t nat -D PREROUTING -i eth0 -p tcp -m multiport --dports 25,465,587,993,995,80,443 -j DNAT --to-destination 10.0.0.2
|
||||
|
||||
[Peer]
|
||||
PublicKey = ##PUBLIC KEY FOR PRIVATE SERVER##
|
||||
AllowedIPs = 10.0.0.2/32 # IP of the home server in VPN
|
||||
```
|
8
mailserver/cron/fts_xapian
Normal file
8
mailserver/cron/fts_xapian
Normal file
|
@ -0,0 +1,8 @@
|
|||
# Adding `MAILTO=""` prevents cron emailing notifications of the task outcome each run
|
||||
MAILTO=""
|
||||
#
|
||||
# m h dom mon dow user command
|
||||
#
|
||||
# Everyday 4:00AM, optimize index files
|
||||
0 4 * * * root doveadm fts optimize -A
|
||||
# EOF
|
6
mailserver/cron/s3
Normal file
6
mailserver/cron/s3
Normal file
|
@ -0,0 +1,6 @@
|
|||
# Adding `MAILTO=""` prevents cron emailing notifications of the task outcome each run
|
||||
MAILTO=""
|
||||
#
|
||||
# m h dom mon dow user command
|
||||
* * * * * root /usr/local/bin/s3-ingest >> /var/log/mail/s3-ingest.log 2>&1
|
||||
# EOF
|
86
mailserver/docker-compose.yml
Normal file
86
mailserver/docker-compose.yml
Normal file
|
@ -0,0 +1,86 @@
|
|||
services:
|
||||
|
||||
wireguard:
|
||||
image: lscr.io/linuxserver/wireguard:latest
|
||||
hostname: ${HOSTNAME}
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
environment:
|
||||
- TZ=America/Edmonton
|
||||
volumes:
|
||||
- ./wireguard.conf:/config/wg_confs/wg0.conf
|
||||
restart: always
|
||||
sysctls:
|
||||
- net.ipv4.ip_forward=1
|
||||
|
||||
|
||||
mailserver:
|
||||
image: ghcr.io/docker-mailserver/docker-mailserver:latest
|
||||
network_mode: service:wireguard
|
||||
volumes:
|
||||
- ./data/dms/mail-data/:/var/mail/
|
||||
- ./data/dms/mail-state/:/var/mail-state/
|
||||
- ./data/dms/mail-logs/:/var/log/mail/
|
||||
- ./data/dms/config/:/tmp/docker-mailserver/
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
|
||||
# Enable ingestion from S3
|
||||
#- ./s3-ingest.py:/usr/local/bin/s3-ingest:ro
|
||||
#- ./cron/s3:/etc/cron.d/s3:ro
|
||||
|
||||
# Enable full text searching
|
||||
# https://docker-mailserver.github.io/docker-mailserver/latest/config/advanced/full-text-search/
|
||||
- ./fts-xapian-plugin.conf:/etc/dovecot/conf.d/10-plugin.conf:ro
|
||||
- ./cron/fts_xapian:/etc/cron.d/fts_xapian:ro
|
||||
|
||||
# when initializing, these need to be commented out because they don't exist.
|
||||
# until Caddy has had a chance to fetch them.
|
||||
- ./data/caddy/certificates/acme.zerossl.com-v2-dv90/${HOSTNAME}/${HOSTNAME}.crt:/etc/letsencrypt/live/${HOSTNAME}/fullchain.pem:ro
|
||||
- ./data/caddy/certificates/acme.zerossl.com-v2-dv90/${HOSTNAME}/${HOSTNAME}.key:/etc/letsencrypt/live/${HOSTNAME}/privkey.pem:ro
|
||||
environment:
|
||||
- ENABLE_RSPAMD=1
|
||||
- ENABLE_OPENDMARC=0
|
||||
- ENABLE_POLICYD_SPF=0
|
||||
- ENABLE_FAIL2BAN=1
|
||||
- ENABLE_POSTGREY=1
|
||||
- ENABLE_DNSBL=1
|
||||
- ENABLE_CLAMAV=1
|
||||
- ENABLE_POP3=1
|
||||
|
||||
# We'll leverage certs from Caddy here
|
||||
- SSL_TYPE=letsencrypt
|
||||
|
||||
# Assume we can't send outbound mail. Relay sent mail through
|
||||
# something like Mailgun or Amazon SES
|
||||
- RELAY_HOST=${RELAY_HOST}
|
||||
- RELAY_PORT=${RELAY_PORT}
|
||||
- RELAY_USER=${RELAY_USER}
|
||||
- RELAY_PASSWORD=${RELAY_PASSWORD}
|
||||
cap_add:
|
||||
- NET_ADMIN # For Fail2Ban to work
|
||||
restart: always
|
||||
|
||||
# ========= WEBMAIL =========================================
|
||||
# Who doesn't want webmail. Besides we can piggy back on this
|
||||
# to fetch TLS certificates for our IMAP/SMTP services.
|
||||
|
||||
caddy:
|
||||
image: caddy:latest
|
||||
restart: always
|
||||
network_mode: service:wireguard
|
||||
volumes:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile # Mount Caddyfile for configuration
|
||||
- ./data/caddy:/data/caddy # Persistent storage for certificates
|
||||
|
||||
roundcube:
|
||||
image: roundcube/roundcubemail:latest
|
||||
container_name: roundcubemail
|
||||
restart: always
|
||||
volumes:
|
||||
- ./data/roundcube/www:/var/www/html
|
||||
- ./data/roundcube/db:/var/roundcube/db
|
||||
environment:
|
||||
- ROUNDCUBEMAIL_DB_TYPE=sqlite
|
||||
- ROUNDCUBEMAIL_SKIN=elastic
|
||||
- ROUNDCUBEMAIL_DEFAULT_HOST=tls://${HOSTNAME}
|
||||
- ROUNDCUBEMAIL_SMTP_SERVER=tls://${HOSTNAME}
|
28
mailserver/fts-xapian-plugin.conf
Normal file
28
mailserver/fts-xapian-plugin.conf
Normal file
|
@ -0,0 +1,28 @@
|
|||
mail_plugins = $mail_plugins fts fts_xapian
|
||||
|
||||
plugin {
|
||||
fts = xapian
|
||||
fts_xapian = partial=3 full=20 verbose=0
|
||||
|
||||
fts_autoindex = yes
|
||||
fts_enforced = yes
|
||||
|
||||
# disable indexing of folders
|
||||
fts_autoindex_exclude = \Trash
|
||||
|
||||
# Index attachements
|
||||
# fts_decoder = decode2text
|
||||
}
|
||||
|
||||
service indexer-worker {
|
||||
# limit size of indexer-worker RAM usage, ex: 512MB, 1GB, 2GB
|
||||
vsz_limit = 1GB
|
||||
}
|
||||
|
||||
# service decode2text {
|
||||
# executable = script /usr/libexec/dovecot/decode2text.sh
|
||||
# user = dovecot
|
||||
# unix_listener decode2text {
|
||||
# mode = 0666
|
||||
# }
|
||||
# }
|
149
mailserver/s3-ingest.py
Executable file
149
mailserver/s3-ingest.py
Executable file
|
@ -0,0 +1,149 @@
|
|||
#!/bin/env python3
|
||||
|
||||
import os
|
||||
import datetime
|
||||
import hashlib
|
||||
import hmac
|
||||
import http.client
|
||||
import urllib.parse
|
||||
import logging
|
||||
import subprocess
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
# AWS S3 configuration
|
||||
# Would rather these be in environment variables, but CRON doesn't have this.
|
||||
bucket_name = "MYMAILBUCKET"
|
||||
prefix = ""
|
||||
region = 'us-west-2'
|
||||
access_key = ""
|
||||
secret_key = ""
|
||||
|
||||
# Logging configuration
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(levelname)s - %(message)s',
|
||||
datefmt='%Y-%m-%d %H:%M:%S')
|
||||
|
||||
def sign(key, msg):
|
||||
return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest()
|
||||
|
||||
def get_signature_key(key, date_stamp, region_name, service_name):
|
||||
k_date = sign(('AWS4' + key).encode('utf-8'), date_stamp)
|
||||
k_region = sign(k_date, region_name)
|
||||
k_service = sign(k_region, service_name)
|
||||
k_signing = sign(k_service, 'aws4_request')
|
||||
return k_signing
|
||||
|
||||
def create_signed_headers(method, host, uri, params, body=''):
|
||||
t = datetime.datetime.utcnow()
|
||||
amz_date = t.strftime('%Y%m%dT%H%M%SZ')
|
||||
date_stamp = t.strftime('%Y%m%d')
|
||||
|
||||
canonical_uri = uri
|
||||
canonical_querystring = '&'.join([f"{urllib.parse.quote_plus(k)}={urllib.parse.quote_plus(v)}" for k, v in params.items()])
|
||||
payload_hash = hashlib.sha256(body.encode('utf-8')).hexdigest() if body else hashlib.sha256(b'').hexdigest()
|
||||
|
||||
# Include x-amz-date and x-amz-content-sha256 in canonical headers and signed headers
|
||||
canonical_headers = f'host:{host}\n' \
|
||||
f'x-amz-content-sha256:{payload_hash}\n' \
|
||||
f'x-amz-date:{amz_date}\n'
|
||||
signed_headers = 'host;x-amz-content-sha256;x-amz-date'
|
||||
|
||||
canonical_request = f"{method}\n{canonical_uri}\n{canonical_querystring}\n{canonical_headers}\n{signed_headers}\n{payload_hash}"
|
||||
algorithm = 'AWS4-HMAC-SHA256'
|
||||
credential_scope = f'{date_stamp}/{region}/s3/aws4_request'
|
||||
string_to_sign = f'{algorithm}\n{amz_date}\n{credential_scope}\n{hashlib.sha256(canonical_request.encode("utf-8")).hexdigest()}'
|
||||
|
||||
signing_key = get_signature_key(secret_key, date_stamp, region, 's3')
|
||||
signature = hmac.new(signing_key, string_to_sign.encode('utf-8'), hashlib.sha256).hexdigest()
|
||||
|
||||
authorization_header = (
|
||||
f"{algorithm} Credential={access_key}/{credential_scope}, "
|
||||
f"SignedHeaders={signed_headers}, Signature={signature}"
|
||||
)
|
||||
|
||||
headers = {
|
||||
'x-amz-date': amz_date,
|
||||
'x-amz-content-sha256': payload_hash,
|
||||
'Authorization': authorization_header
|
||||
}
|
||||
return headers
|
||||
|
||||
def make_request(method, uri, params=None, headers=None):
|
||||
host = f's3.{region}.amazonaws.com'
|
||||
conn = http.client.HTTPSConnection(host)
|
||||
|
||||
if params:
|
||||
query_string = urllib.parse.urlencode(params)
|
||||
full_uri = f"{uri}?{query_string}"
|
||||
else:
|
||||
full_uri = uri
|
||||
|
||||
conn.request(method, full_uri, headers=headers)
|
||||
response = conn.getresponse()
|
||||
data = response.read()
|
||||
conn.close()
|
||||
|
||||
return response.status, data
|
||||
|
||||
def list_objects():
|
||||
uri = f'/{bucket_name}'
|
||||
params = {'list-type': '2', 'prefix': prefix}
|
||||
headers = create_signed_headers('GET', f's3.{region}.amazonaws.com', uri, params)
|
||||
|
||||
status, response = make_request('GET', uri, params, headers)
|
||||
if status == 200:
|
||||
return response
|
||||
else:
|
||||
logging.error(f"Error listing objects: {response}")
|
||||
return None
|
||||
|
||||
def download_object(key):
|
||||
uri = f'/{bucket_name}/{urllib.parse.quote_plus(key)}'
|
||||
headers = create_signed_headers('GET', f's3.{region}.amazonaws.com', uri, {})
|
||||
|
||||
status, response = make_request('GET', uri, headers=headers)
|
||||
if status == 200:
|
||||
return response
|
||||
else:
|
||||
logging.error(f"Error downloading {key}: {response}")
|
||||
return None
|
||||
|
||||
def delete_object(key):
|
||||
uri = f'/{bucket_name}/{urllib.parse.quote_plus(key)}'
|
||||
headers = create_signed_headers('DELETE', f's3.{region}.amazonaws.com', uri, {})
|
||||
|
||||
status, response = make_request('DELETE', uri, headers=headers)
|
||||
if status == 204:
|
||||
logging.info(f"Deleted {key} from S3")
|
||||
else:
|
||||
logging.error(f"Error deleting {key}: {response}")
|
||||
|
||||
def inject_email(email_content):
|
||||
process = subprocess.Popen(['/usr/sbin/sendmail', '-t'], stdin=subprocess.PIPE)
|
||||
process.communicate(input=email_content)
|
||||
if process.returncode == 0:
|
||||
logging.info("Email successfully injected into Postfix")
|
||||
else:
|
||||
logging.error("Failed to inject email into Postfix")
|
||||
|
||||
def main():
|
||||
# List all objects with the specified prefix
|
||||
xml_content = list_objects()
|
||||
if xml_content:
|
||||
root = ET.fromstring(xml_content)
|
||||
namespace = {'ns': root.tag.split('}')[0].strip('{')} # Extracts namespace from the root tag
|
||||
|
||||
for contents in root.findall('.//ns:Contents', namespace):
|
||||
key = contents.find('ns:Key', namespace).text
|
||||
logging.info(f"Processing {key}")
|
||||
email_content = download_object(key)
|
||||
if email_content:
|
||||
inject_email(email_content)
|
||||
delete_object(key)
|
||||
|
||||
def extract_keys_from_xml(xml_content):
|
||||
return [elem.text for elem in root.iter('Key')]
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
13
mailserver/wireguard.conf
Normal file
13
mailserver/wireguard.conf
Normal file
|
@ -0,0 +1,13 @@
|
|||
[Interface]
|
||||
Address = 10.0.0.2/24 # Private IP for the home server in the VPN network
|
||||
PrivateKey = ##PRIVATE KEY FOR PRIVATE SERVER##
|
||||
Table = 123
|
||||
|
||||
PreUp = ip rule add from 10.0.0.2 table 123 priority 1
|
||||
PostDown = ip rule del from 10.0.0.2 table 123 priority 1
|
||||
|
||||
[Peer]
|
||||
PublicKey = ##PUBLIC_KEY_FOR_PUBLIC_SERVER##
|
||||
AllowedIPs = 0.0.0.0/0
|
||||
Endpoint = ##PUBLIC_SERVER_IP##:51820
|
||||
PersistentKeepalive = 25
|
Loading…
Add table
Add a link
Reference in a new issue