615448b875
When ip_range changes in Settings, the new subnet is now applied to: - DNS zone records (network_manager.apply_ip_range) - Caddy virtual IPs (firewall_manager.ensure_caddy_virtual_ips) - iptables per-service rules (firewall_manager.update_service_ips) - docker-compose.yml static IPs if writable (ip_utils.update_docker_compose_ips) New module ip_utils.py derives all container IPs from the subnet using fixed offsets so the entire stack stays consistent from one setting. 321 tests pass (72 new tests added for ip_utils, apply_ip_range, update_service_ips). Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
316 lines
12 KiB
Python
316 lines
12 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Firewall Manager for Personal Internet Cell
|
|
Manages per-peer iptables rules in the WireGuard container and DNS ACLs in CoreDNS.
|
|
"""
|
|
|
|
import os
|
|
import subprocess
|
|
import logging
|
|
import re
|
|
from typing import Dict, List, Any, Optional
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
# Virtual IPs assigned to Caddy per service — must match Caddyfile listeners.
|
|
# Populated at import time from the default subnet; call update_service_ips()
|
|
# whenever ip_range changes so all downstream callers see the new values.
|
|
SERVICE_IPS: Dict[str, str] = {
|
|
'calendar': '172.20.0.21',
|
|
'files': '172.20.0.22',
|
|
'mail': '172.20.0.23',
|
|
'webdav': '172.20.0.24',
|
|
}
|
|
|
|
|
|
def update_service_ips(ip_range: str) -> None:
|
|
"""Recalculate SERVICE_IPS from the new subnet and update in-place."""
|
|
from ip_utils import get_virtual_ips
|
|
new_ips = get_virtual_ips(ip_range)
|
|
SERVICE_IPS.clear()
|
|
SERVICE_IPS.update(new_ips)
|
|
|
|
# Internal RFC-1918 ranges (peer traffic stays inside these = cell-only access)
|
|
PRIVATE_NETS = ['10.0.0.0/8', '172.16.0.0/12', '192.168.0.0/16']
|
|
|
|
WIREGUARD_CONTAINER = 'cell-wireguard'
|
|
CADDY_CONTAINER = 'cell-caddy'
|
|
COREFILE_PATH = '/app/config/dns/Corefile'
|
|
ZONE_DATA_DIR = '/data' # inside CoreDNS container; mounted from ./data/dns
|
|
|
|
|
|
def _run(cmd: List[str], check: bool = True) -> subprocess.CompletedProcess:
|
|
"""Run a shell command and return the result."""
|
|
try:
|
|
result = subprocess.run(cmd, capture_output=True, text=True, timeout=10)
|
|
if check and result.returncode != 0:
|
|
logger.warning(f"Command {cmd} exited {result.returncode}: {result.stderr.strip()}")
|
|
return result
|
|
except Exception as e:
|
|
logger.error(f"Command {cmd} failed: {e}")
|
|
raise
|
|
|
|
|
|
def _wg_exec(args: List[str]) -> subprocess.CompletedProcess:
|
|
"""Run a command inside the WireGuard container via docker exec."""
|
|
return _run(['docker', 'exec', WIREGUARD_CONTAINER] + args, check=False)
|
|
|
|
|
|
def _caddy_exec(args: List[str]) -> subprocess.CompletedProcess:
|
|
"""Run a command inside the Caddy container via docker exec."""
|
|
return _run(['docker', 'exec', CADDY_CONTAINER] + args, check=False)
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Virtual IP management (Caddy container)
|
|
# ---------------------------------------------------------------------------
|
|
|
|
def ensure_caddy_virtual_ips() -> bool:
|
|
"""Add per-service virtual IPs to Caddy's eth0 if not already present."""
|
|
try:
|
|
result = _caddy_exec(['ip', 'addr', 'show', 'eth0'])
|
|
existing = result.stdout
|
|
|
|
for service, ip in SERVICE_IPS.items():
|
|
if ip not in existing:
|
|
r = _caddy_exec(['ip', 'addr', 'add', f'{ip}/16', 'dev', 'eth0'])
|
|
if r.returncode == 0:
|
|
logger.info(f"Added virtual IP {ip} for {service} to Caddy eth0")
|
|
else:
|
|
logger.warning(f"Failed to add virtual IP {ip}: {r.stderr.strip()}")
|
|
return True
|
|
except Exception as e:
|
|
logger.error(f"ensure_caddy_virtual_ips failed: {e}")
|
|
return False
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# iptables rule helpers
|
|
# ---------------------------------------------------------------------------
|
|
|
|
def _iptables(args: List[str], check: bool = False) -> subprocess.CompletedProcess:
|
|
return _wg_exec(['iptables'] + args)
|
|
|
|
|
|
def _rule_exists(chain: str, rule_args: List[str]) -> bool:
|
|
result = _iptables(['-C', chain] + rule_args)
|
|
return result.returncode == 0
|
|
|
|
|
|
def _ensure_rule(chain: str, rule_args: List[str]) -> None:
|
|
"""Insert rule at top of chain if it doesn't already exist."""
|
|
if not _rule_exists(chain, rule_args):
|
|
_iptables(['-I', chain] + rule_args)
|
|
|
|
|
|
def _delete_rule(chain: str, rule_args: List[str]) -> None:
|
|
"""Delete rule from chain (silently if it doesn't exist)."""
|
|
while _rule_exists(chain, rule_args):
|
|
_iptables(['-D', chain] + rule_args)
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Per-peer rule management
|
|
# ---------------------------------------------------------------------------
|
|
|
|
def _peer_comment(peer_ip: str) -> str:
|
|
return f'pic-peer-{peer_ip.replace(".", "-")}'
|
|
|
|
|
|
def clear_peer_rules(peer_ip: str) -> None:
|
|
"""Remove all FORWARD rules tagged with this peer's IP via iptables-save/restore."""
|
|
comment = _peer_comment(peer_ip)
|
|
try:
|
|
# Dump rules, strip matching lines, restore — atomic and order-stable
|
|
save = _wg_exec(['iptables-save'])
|
|
if save.returncode != 0:
|
|
return
|
|
lines = save.stdout.splitlines()
|
|
filtered = [l for l in lines if comment not in l]
|
|
if len(filtered) == len(lines):
|
|
return # nothing to remove
|
|
restore_input = '\n'.join(filtered) + '\n'
|
|
restore = subprocess.run(
|
|
['docker', 'exec', '-i', WIREGUARD_CONTAINER, 'iptables-restore'],
|
|
input=restore_input, capture_output=True, text=True, timeout=10
|
|
)
|
|
if restore.returncode != 0:
|
|
logger.warning(f"iptables-restore failed: {restore.stderr.strip()}")
|
|
except Exception as e:
|
|
logger.error(f"clear_peer_rules({peer_ip}): {e}")
|
|
|
|
|
|
def apply_peer_rules(peer_ip: str, settings: Dict[str, Any]) -> bool:
|
|
"""
|
|
Apply iptables FORWARD rules for a peer based on their access settings.
|
|
|
|
Each rule is inserted at position 1 (-I), so the LAST call ends up at the TOP.
|
|
We insert in reverse-priority order: lowest-priority rules first, highest last.
|
|
|
|
Desired final chain order (top = highest priority):
|
|
1. Per-service DROP/ACCEPT (most specific — must beat private-net ACCEPT)
|
|
2. Peer-to-peer ACCEPT/DROP (10.0.0.0/24)
|
|
3. Private-net ACCEPTs (for no-internet peers to reach local resources)
|
|
4. Internet DROP or ACCEPT (lowest priority catch-all)
|
|
"""
|
|
try:
|
|
comment = _peer_comment(peer_ip)
|
|
clear_peer_rules(peer_ip)
|
|
|
|
internet_access = settings.get('internet_access', True)
|
|
service_access = settings.get('service_access', list(SERVICE_IPS.keys()))
|
|
peer_access = settings.get('peer_access', True)
|
|
|
|
# --- Step 1 (inserted first → ends up at bottom before default ACCEPT) ---
|
|
# Internet catch-all: allow or block
|
|
if internet_access:
|
|
_iptables(['-I', 'FORWARD', '-s', peer_ip,
|
|
'-m', 'comment', '--comment', comment, '-j', 'ACCEPT'])
|
|
else:
|
|
# Block non-private, allow private nets
|
|
_iptables(['-I', 'FORWARD', '-s', peer_ip,
|
|
'-m', 'comment', '--comment', comment, '-j', 'DROP'])
|
|
for net in reversed(PRIVATE_NETS):
|
|
_iptables(['-I', 'FORWARD', '-s', peer_ip, '-d', net,
|
|
'-m', 'comment', '--comment', comment, '-j', 'ACCEPT'])
|
|
|
|
# --- Step 2 --- Peer-to-peer (10.0.0.0/24)
|
|
target = 'ACCEPT' if peer_access else 'DROP'
|
|
_iptables(['-I', 'FORWARD', '-s', peer_ip, '-d', '10.0.0.0/24',
|
|
'-m', 'comment', '--comment', comment, '-j', target])
|
|
|
|
# --- Step 3 (inserted last → ends up at TOP of chain) ---
|
|
# Per-service rules — inserted in reverse dict order so first service ends up at top
|
|
for service, svc_ip in reversed(list(SERVICE_IPS.items())):
|
|
target = 'ACCEPT' if service in service_access else 'DROP'
|
|
_iptables(['-I', 'FORWARD', '-s', peer_ip, '-d', svc_ip,
|
|
'-m', 'comment', '--comment', comment, '-j', target])
|
|
|
|
logger.info(f"Applied rules for {peer_ip}: internet={internet_access} "
|
|
f"services={service_access} peers={peer_access}")
|
|
return True
|
|
except Exception as e:
|
|
logger.error(f"apply_peer_rules({peer_ip}): {e}")
|
|
return False
|
|
|
|
|
|
def apply_all_peer_rules(peers: List[Dict[str, Any]]) -> None:
|
|
"""Re-apply rules for all peers (called on startup)."""
|
|
ensure_caddy_virtual_ips()
|
|
for peer in peers:
|
|
ip = peer.get('ip')
|
|
if not ip:
|
|
continue
|
|
apply_peer_rules(ip, {
|
|
'internet_access': peer.get('internet_access', True),
|
|
'service_access': peer.get('service_access', list(SERVICE_IPS.keys())),
|
|
'peer_access': peer.get('peer_access', True),
|
|
})
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# DNS ACL (CoreDNS Corefile generation)
|
|
# ---------------------------------------------------------------------------
|
|
|
|
# Map service name → DNS hostname in .cell zone
|
|
SERVICE_HOSTS = {
|
|
'calendar': 'calendar.cell.',
|
|
'files': 'files.cell.',
|
|
'mail': 'mail.cell.',
|
|
'webdav': 'webdav.cell.',
|
|
}
|
|
|
|
|
|
def _build_acl_block(blocked_peers_by_service: Dict[str, List[str]]) -> str:
|
|
"""
|
|
Build CoreDNS ACL plugin stanzas.
|
|
|
|
blocked_peers_by_service: { 'calendar': ['10.0.0.2', '10.0.0.3'], ... }
|
|
Returns a string to embed in the `cell { }` zone block.
|
|
"""
|
|
if not blocked_peers_by_service:
|
|
return ''
|
|
|
|
lines = []
|
|
for service, peer_ips in blocked_peers_by_service.items():
|
|
host = SERVICE_HOSTS.get(service)
|
|
if not host or not peer_ips:
|
|
continue
|
|
for ip in peer_ips:
|
|
lines.append(f' acl {host} {{')
|
|
lines.append(f' block net {ip}/32')
|
|
lines.append(f' allow net 0.0.0.0/0')
|
|
lines.append(f' allow net ::/0')
|
|
lines.append(f' }}')
|
|
return '\n'.join(lines)
|
|
|
|
|
|
def generate_corefile(peers: List[Dict[str, Any]], corefile_path: str = COREFILE_PATH) -> bool:
|
|
"""
|
|
Rewrite the CoreDNS Corefile with per-peer ACL rules and reload plugin.
|
|
The file is written to corefile_path (API-side path mapped into CoreDNS container).
|
|
"""
|
|
try:
|
|
# Collect which peers block which services
|
|
blocked: Dict[str, List[str]] = {s: [] for s in SERVICE_IPS}
|
|
for peer in peers:
|
|
ip = peer.get('ip')
|
|
if not ip:
|
|
continue
|
|
allowed_services = peer.get('service_access', list(SERVICE_IPS.keys()))
|
|
for service in SERVICE_IPS:
|
|
if service not in allowed_services:
|
|
blocked[service].append(ip)
|
|
|
|
acl_block = _build_acl_block(blocked)
|
|
|
|
cell_zone_block = 'cell {\n file /data/cell.zone\n log\n'
|
|
if acl_block:
|
|
cell_zone_block += acl_block + '\n'
|
|
cell_zone_block += '}\n'
|
|
|
|
corefile = f""". {{
|
|
forward . 8.8.8.8 1.1.1.1
|
|
cache
|
|
log
|
|
health
|
|
}}
|
|
|
|
{cell_zone_block}
|
|
local.cell {{
|
|
file /data/local.zone
|
|
log
|
|
}}
|
|
"""
|
|
os.makedirs(os.path.dirname(corefile_path), exist_ok=True)
|
|
with open(corefile_path, 'w') as f:
|
|
f.write(corefile)
|
|
|
|
logger.info(f"Wrote Corefile to {corefile_path}")
|
|
return True
|
|
except Exception as e:
|
|
logger.error(f"generate_corefile: {e}")
|
|
return False
|
|
|
|
|
|
def reload_coredns() -> bool:
|
|
"""Send SIGHUP to CoreDNS container to reload config."""
|
|
try:
|
|
result = _run(['docker', 'kill', '--signal=SIGHUP', 'cell-dns'], check=False)
|
|
if result.returncode == 0:
|
|
logger.info("Sent SIGHUP to cell-dns")
|
|
return True
|
|
logger.warning(f"SIGHUP to cell-dns failed: {result.stderr.strip()}")
|
|
return False
|
|
except Exception as e:
|
|
logger.error(f"reload_coredns: {e}")
|
|
return False
|
|
|
|
|
|
def apply_all_dns_rules(peers: List[Dict[str, Any]], corefile_path: str = COREFILE_PATH) -> bool:
|
|
"""Regenerate Corefile and reload CoreDNS."""
|
|
ok = generate_corefile(peers, corefile_path)
|
|
if ok:
|
|
reload_coredns()
|
|
return ok
|