feat: per-peer access enforcement, live peer status, auto IP assignment
Server-side access control: - firewall_manager.py: per-peer iptables FORWARD rules in WireGuard container; virtual IPs on Caddy (172.20.0.21-24) for per-service DROP/ACCEPT targeting - CoreDNS Corefile regenerated with ACL blocks for blocked services per peer - POST /api/wireguard/apply-enforcement re-applies rules after WireGuard restart; wg0.conf PostUp calls it via curl so rules restore automatically on container start WireGuard fixes: - _syncconf uses `wg set peer` instead of `wg syncconf` to avoid resetting ListenPort - add_peer validates AllowedIPs must be /32 — rejects full/split tunnel CIDRs that would route internet or LAN traffic to that peer - _config_file() checks for linuxserver wg_confs/ subdirectory first UI: - Peers page fetches /api/wireguard/peers/statuses for live handshake data; status badge now shows real Online/Offline + seconds since last handshake - IP field removed from Add Peer form (auto-assigned from 10.0.0.0/24) Tests (246 pass): - test_firewall_manager.py: 22 tests for ACL generation, iptables rule correctness, comment tagging, clear_peer_rules filter logic - test_peer_wg_integration.py: 10 tests for /32 enforcement, IP auto-assignment, syncconf called on add/remove - test_wireguard_manager.py: updated to reflect correct IPs and /32 requirement Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
+57
-6
@@ -41,6 +41,7 @@ from container_manager import ContainerManager
|
||||
from config_manager import ConfigManager
|
||||
from service_bus import ServiceBus, EventType
|
||||
from log_manager import LogManager
|
||||
import firewall_manager
|
||||
|
||||
# Context variable for request info
|
||||
request_context = contextvars.ContextVar('request_context', default={})
|
||||
@@ -168,6 +169,21 @@ cell_manager = CellManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR)
|
||||
app.vault_manager = VaultManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR)
|
||||
container_manager = ContainerManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR)
|
||||
|
||||
# Apply firewall + DNS rules from stored peer settings (survives API restarts)
|
||||
def _apply_startup_enforcement():
|
||||
try:
|
||||
peers = peer_registry.list_peers()
|
||||
firewall_manager.apply_all_peer_rules(peers)
|
||||
firewall_manager.apply_all_dns_rules(peers, COREFILE_PATH)
|
||||
logger.info(f"Applied enforcement rules for {len(peers)} peers on startup")
|
||||
except Exception as e:
|
||||
logger.warning(f"Startup enforcement failed (non-fatal): {e}")
|
||||
|
||||
COREFILE_PATH = '/app/config/dns/Corefile'
|
||||
|
||||
# Run in background so startup isn't blocked waiting on docker exec
|
||||
threading.Thread(target=_apply_startup_enforcement, daemon=True).start()
|
||||
|
||||
# Register services with service bus
|
||||
service_bus.register_service('network', network_manager)
|
||||
service_bus.register_service('wireguard', wireguard_manager)
|
||||
@@ -942,6 +958,17 @@ def refresh_external_ip():
|
||||
logger.error(f"Error refreshing external IP: {e}")
|
||||
return jsonify({"error": str(e)}), 500
|
||||
|
||||
@app.route('/api/wireguard/apply-enforcement', methods=['POST'])
|
||||
def apply_wireguard_enforcement():
|
||||
"""Re-apply per-peer iptables and DNS enforcement rules (call after WireGuard restart)."""
|
||||
try:
|
||||
peers = peer_registry.list_peers()
|
||||
firewall_manager.apply_all_peer_rules(peers)
|
||||
firewall_manager.apply_all_dns_rules(peers, COREFILE_PATH)
|
||||
return jsonify({'ok': True, 'peers': len(peers)})
|
||||
except Exception as e:
|
||||
return jsonify({'error': str(e)}), 500
|
||||
|
||||
@app.route('/api/wireguard/check-port', methods=['POST'])
|
||||
def check_wireguard_port():
|
||||
try:
|
||||
@@ -961,6 +988,20 @@ def get_peers():
|
||||
logger.error(f"Error getting peers: {e}")
|
||||
return jsonify({"error": str(e)}), 500
|
||||
|
||||
def _next_peer_ip() -> str:
|
||||
"""Auto-assign the next free 10.0.0.x address (starts at .2, skips .1 = server)."""
|
||||
import ipaddress
|
||||
used = {p.get('ip', '').split('/')[0] for p in peer_registry.list_peers()}
|
||||
network = ipaddress.ip_network('10.0.0.0/24')
|
||||
for host in network.hosts():
|
||||
ip = str(host)
|
||||
if ip == '10.0.0.1':
|
||||
continue # server address
|
||||
if ip not in used:
|
||||
return ip
|
||||
raise ValueError('No free IPs left in 10.0.0.0/24')
|
||||
|
||||
|
||||
@app.route('/api/peers', methods=['POST'])
|
||||
def add_peer():
|
||||
"""Add a peer."""
|
||||
@@ -968,17 +1009,19 @@ def add_peer():
|
||||
data = request.get_json(silent=True)
|
||||
if data is None:
|
||||
return jsonify({"error": "No data provided"}), 400
|
||||
|
||||
# Validate required fields
|
||||
required_fields = ['name', 'ip', 'public_key']
|
||||
|
||||
# Validate required fields (ip is optional — auto-assigned if omitted)
|
||||
required_fields = ['name', 'public_key']
|
||||
for field in required_fields:
|
||||
if field not in data:
|
||||
return jsonify({"error": f"Missing required field: {field}"}), 400
|
||||
|
||||
|
||||
assigned_ip = data.get('ip') or _next_peer_ip()
|
||||
|
||||
# Add peer to registry with all provided fields
|
||||
peer_info = {
|
||||
'peer': data['name'],
|
||||
'ip': data['ip'],
|
||||
'ip': assigned_ip,
|
||||
'public_key': data['public_key'],
|
||||
'private_key': data.get('private_key'),
|
||||
'server_public_key': data.get('server_public_key'),
|
||||
@@ -994,7 +1037,10 @@ def add_peer():
|
||||
|
||||
success = peer_registry.add_peer(peer_info)
|
||||
if success:
|
||||
return jsonify({"message": f"Peer {data['name']} added successfully"}), 201
|
||||
# Apply server-side enforcement immediately
|
||||
firewall_manager.apply_peer_rules(peer_info['ip'], peer_info)
|
||||
firewall_manager.apply_all_dns_rules(peer_registry.list_peers(), COREFILE_PATH)
|
||||
return jsonify({"message": f"Peer {data['name']} added successfully", "ip": assigned_ip}), 201
|
||||
else:
|
||||
return jsonify({"error": f"Peer {data['name']} already exists"}), 400
|
||||
|
||||
@@ -1025,6 +1071,11 @@ def update_peer(peer_name):
|
||||
|
||||
success = peer_registry.update_peer(peer_name, updates)
|
||||
if success:
|
||||
# Re-apply server-side enforcement with updated settings
|
||||
updated_peer = peer_registry.get_peer(peer_name)
|
||||
if updated_peer:
|
||||
firewall_manager.apply_peer_rules(updated_peer['ip'], updated_peer)
|
||||
firewall_manager.apply_all_dns_rules(peer_registry.list_peers(), COREFILE_PATH)
|
||||
result = {"message": f"Peer {peer_name} updated", "config_changed": config_changed}
|
||||
return jsonify(result)
|
||||
else:
|
||||
|
||||
@@ -0,0 +1,305 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Firewall Manager for Personal Internet Cell
|
||||
Manages per-peer iptables rules in the WireGuard container and DNS ACLs in CoreDNS.
|
||||
"""
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import logging
|
||||
import re
|
||||
from typing import Dict, List, Any, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Virtual IPs assigned to Caddy per service — must match Caddyfile listeners
|
||||
SERVICE_IPS = {
|
||||
'calendar': '172.20.0.21',
|
||||
'files': '172.20.0.22',
|
||||
'mail': '172.20.0.23',
|
||||
'webdav': '172.20.0.24',
|
||||
}
|
||||
|
||||
# Internal RFC-1918 ranges (peer traffic stays inside these = cell-only access)
|
||||
PRIVATE_NETS = ['10.0.0.0/8', '172.16.0.0/12', '192.168.0.0/16']
|
||||
|
||||
WIREGUARD_CONTAINER = 'cell-wireguard'
|
||||
CADDY_CONTAINER = 'cell-caddy'
|
||||
COREFILE_PATH = '/app/config/dns/Corefile'
|
||||
ZONE_DATA_DIR = '/data' # inside CoreDNS container; mounted from ./data/dns
|
||||
|
||||
|
||||
def _run(cmd: List[str], check: bool = True) -> subprocess.CompletedProcess:
|
||||
"""Run a shell command and return the result."""
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, timeout=10)
|
||||
if check and result.returncode != 0:
|
||||
logger.warning(f"Command {cmd} exited {result.returncode}: {result.stderr.strip()}")
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error(f"Command {cmd} failed: {e}")
|
||||
raise
|
||||
|
||||
|
||||
def _wg_exec(args: List[str]) -> subprocess.CompletedProcess:
|
||||
"""Run a command inside the WireGuard container via docker exec."""
|
||||
return _run(['docker', 'exec', WIREGUARD_CONTAINER] + args, check=False)
|
||||
|
||||
|
||||
def _caddy_exec(args: List[str]) -> subprocess.CompletedProcess:
|
||||
"""Run a command inside the Caddy container via docker exec."""
|
||||
return _run(['docker', 'exec', CADDY_CONTAINER] + args, check=False)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Virtual IP management (Caddy container)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def ensure_caddy_virtual_ips() -> bool:
|
||||
"""Add per-service virtual IPs to Caddy's eth0 if not already present."""
|
||||
try:
|
||||
result = _caddy_exec(['ip', 'addr', 'show', 'eth0'])
|
||||
existing = result.stdout
|
||||
|
||||
for service, ip in SERVICE_IPS.items():
|
||||
if ip not in existing:
|
||||
r = _caddy_exec(['ip', 'addr', 'add', f'{ip}/16', 'dev', 'eth0'])
|
||||
if r.returncode == 0:
|
||||
logger.info(f"Added virtual IP {ip} for {service} to Caddy eth0")
|
||||
else:
|
||||
logger.warning(f"Failed to add virtual IP {ip}: {r.stderr.strip()}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"ensure_caddy_virtual_ips failed: {e}")
|
||||
return False
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# iptables rule helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _iptables(args: List[str], check: bool = False) -> subprocess.CompletedProcess:
|
||||
return _wg_exec(['iptables'] + args)
|
||||
|
||||
|
||||
def _rule_exists(chain: str, rule_args: List[str]) -> bool:
|
||||
result = _iptables(['-C', chain] + rule_args)
|
||||
return result.returncode == 0
|
||||
|
||||
|
||||
def _ensure_rule(chain: str, rule_args: List[str]) -> None:
|
||||
"""Insert rule at top of chain if it doesn't already exist."""
|
||||
if not _rule_exists(chain, rule_args):
|
||||
_iptables(['-I', chain] + rule_args)
|
||||
|
||||
|
||||
def _delete_rule(chain: str, rule_args: List[str]) -> None:
|
||||
"""Delete rule from chain (silently if it doesn't exist)."""
|
||||
while _rule_exists(chain, rule_args):
|
||||
_iptables(['-D', chain] + rule_args)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Per-peer rule management
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _peer_comment(peer_ip: str) -> str:
|
||||
return f'pic-peer-{peer_ip.replace(".", "-")}'
|
||||
|
||||
|
||||
def clear_peer_rules(peer_ip: str) -> None:
|
||||
"""Remove all FORWARD rules tagged with this peer's IP via iptables-save/restore."""
|
||||
comment = _peer_comment(peer_ip)
|
||||
try:
|
||||
# Dump rules, strip matching lines, restore — atomic and order-stable
|
||||
save = _wg_exec(['iptables-save'])
|
||||
if save.returncode != 0:
|
||||
return
|
||||
lines = save.stdout.splitlines()
|
||||
filtered = [l for l in lines if comment not in l]
|
||||
if len(filtered) == len(lines):
|
||||
return # nothing to remove
|
||||
restore_input = '\n'.join(filtered) + '\n'
|
||||
restore = subprocess.run(
|
||||
['docker', 'exec', '-i', WIREGUARD_CONTAINER, 'iptables-restore'],
|
||||
input=restore_input, capture_output=True, text=True, timeout=10
|
||||
)
|
||||
if restore.returncode != 0:
|
||||
logger.warning(f"iptables-restore failed: {restore.stderr.strip()}")
|
||||
except Exception as e:
|
||||
logger.error(f"clear_peer_rules({peer_ip}): {e}")
|
||||
|
||||
|
||||
def apply_peer_rules(peer_ip: str, settings: Dict[str, Any]) -> bool:
|
||||
"""
|
||||
Apply iptables FORWARD rules for a peer based on their access settings.
|
||||
|
||||
Each rule is inserted at position 1 (-I), so the LAST call ends up at the TOP.
|
||||
We insert in reverse-priority order: lowest-priority rules first, highest last.
|
||||
|
||||
Desired final chain order (top = highest priority):
|
||||
1. Per-service DROP/ACCEPT (most specific — must beat private-net ACCEPT)
|
||||
2. Peer-to-peer ACCEPT/DROP (10.0.0.0/24)
|
||||
3. Private-net ACCEPTs (for no-internet peers to reach local resources)
|
||||
4. Internet DROP or ACCEPT (lowest priority catch-all)
|
||||
"""
|
||||
try:
|
||||
comment = _peer_comment(peer_ip)
|
||||
clear_peer_rules(peer_ip)
|
||||
|
||||
internet_access = settings.get('internet_access', True)
|
||||
service_access = settings.get('service_access', list(SERVICE_IPS.keys()))
|
||||
peer_access = settings.get('peer_access', True)
|
||||
|
||||
# --- Step 1 (inserted first → ends up at bottom before default ACCEPT) ---
|
||||
# Internet catch-all: allow or block
|
||||
if internet_access:
|
||||
_iptables(['-I', 'FORWARD', '-s', peer_ip,
|
||||
'-m', 'comment', '--comment', comment, '-j', 'ACCEPT'])
|
||||
else:
|
||||
# Block non-private, allow private nets
|
||||
_iptables(['-I', 'FORWARD', '-s', peer_ip,
|
||||
'-m', 'comment', '--comment', comment, '-j', 'DROP'])
|
||||
for net in reversed(PRIVATE_NETS):
|
||||
_iptables(['-I', 'FORWARD', '-s', peer_ip, '-d', net,
|
||||
'-m', 'comment', '--comment', comment, '-j', 'ACCEPT'])
|
||||
|
||||
# --- Step 2 --- Peer-to-peer (10.0.0.0/24)
|
||||
target = 'ACCEPT' if peer_access else 'DROP'
|
||||
_iptables(['-I', 'FORWARD', '-s', peer_ip, '-d', '10.0.0.0/24',
|
||||
'-m', 'comment', '--comment', comment, '-j', target])
|
||||
|
||||
# --- Step 3 (inserted last → ends up at TOP of chain) ---
|
||||
# Per-service rules — inserted in reverse dict order so first service ends up at top
|
||||
for service, svc_ip in reversed(list(SERVICE_IPS.items())):
|
||||
target = 'ACCEPT' if service in service_access else 'DROP'
|
||||
_iptables(['-I', 'FORWARD', '-s', peer_ip, '-d', svc_ip,
|
||||
'-m', 'comment', '--comment', comment, '-j', target])
|
||||
|
||||
logger.info(f"Applied rules for {peer_ip}: internet={internet_access} "
|
||||
f"services={service_access} peers={peer_access}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"apply_peer_rules({peer_ip}): {e}")
|
||||
return False
|
||||
|
||||
|
||||
def apply_all_peer_rules(peers: List[Dict[str, Any]]) -> None:
|
||||
"""Re-apply rules for all peers (called on startup)."""
|
||||
ensure_caddy_virtual_ips()
|
||||
for peer in peers:
|
||||
ip = peer.get('ip')
|
||||
if not ip:
|
||||
continue
|
||||
apply_peer_rules(ip, {
|
||||
'internet_access': peer.get('internet_access', True),
|
||||
'service_access': peer.get('service_access', list(SERVICE_IPS.keys())),
|
||||
'peer_access': peer.get('peer_access', True),
|
||||
})
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# DNS ACL (CoreDNS Corefile generation)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Map service name → DNS hostname in .cell zone
|
||||
SERVICE_HOSTS = {
|
||||
'calendar': 'calendar.cell.',
|
||||
'files': 'files.cell.',
|
||||
'mail': 'mail.cell.',
|
||||
'webdav': 'webdav.cell.',
|
||||
}
|
||||
|
||||
|
||||
def _build_acl_block(blocked_peers_by_service: Dict[str, List[str]]) -> str:
|
||||
"""
|
||||
Build CoreDNS ACL plugin stanzas.
|
||||
|
||||
blocked_peers_by_service: { 'calendar': ['10.0.0.2', '10.0.0.3'], ... }
|
||||
Returns a string to embed in the `cell { }` zone block.
|
||||
"""
|
||||
if not blocked_peers_by_service:
|
||||
return ''
|
||||
|
||||
lines = []
|
||||
for service, peer_ips in blocked_peers_by_service.items():
|
||||
host = SERVICE_HOSTS.get(service)
|
||||
if not host or not peer_ips:
|
||||
continue
|
||||
for ip in peer_ips:
|
||||
lines.append(f' acl {host} {{')
|
||||
lines.append(f' block net {ip}/32')
|
||||
lines.append(f' allow net 0.0.0.0/0')
|
||||
lines.append(f' allow net ::/0')
|
||||
lines.append(f' }}')
|
||||
return '\n'.join(lines)
|
||||
|
||||
|
||||
def generate_corefile(peers: List[Dict[str, Any]], corefile_path: str = COREFILE_PATH) -> bool:
|
||||
"""
|
||||
Rewrite the CoreDNS Corefile with per-peer ACL rules and reload plugin.
|
||||
The file is written to corefile_path (API-side path mapped into CoreDNS container).
|
||||
"""
|
||||
try:
|
||||
# Collect which peers block which services
|
||||
blocked: Dict[str, List[str]] = {s: [] for s in SERVICE_IPS}
|
||||
for peer in peers:
|
||||
ip = peer.get('ip')
|
||||
if not ip:
|
||||
continue
|
||||
allowed_services = peer.get('service_access', list(SERVICE_IPS.keys()))
|
||||
for service in SERVICE_IPS:
|
||||
if service not in allowed_services:
|
||||
blocked[service].append(ip)
|
||||
|
||||
acl_block = _build_acl_block(blocked)
|
||||
|
||||
cell_zone_block = 'cell {\n file /data/cell.zone\n log\n'
|
||||
if acl_block:
|
||||
cell_zone_block += acl_block + '\n'
|
||||
cell_zone_block += '}\n'
|
||||
|
||||
corefile = f""". {{
|
||||
forward . 8.8.8.8 1.1.1.1
|
||||
cache
|
||||
log
|
||||
health
|
||||
}}
|
||||
|
||||
{cell_zone_block}
|
||||
local.cell {{
|
||||
file /data/local.zone
|
||||
log
|
||||
}}
|
||||
"""
|
||||
os.makedirs(os.path.dirname(corefile_path), exist_ok=True)
|
||||
with open(corefile_path, 'w') as f:
|
||||
f.write(corefile)
|
||||
|
||||
logger.info(f"Wrote Corefile to {corefile_path}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"generate_corefile: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def reload_coredns() -> bool:
|
||||
"""Send SIGHUP to CoreDNS container to reload config."""
|
||||
try:
|
||||
result = _run(['docker', 'kill', '--signal=SIGHUP', 'cell-dns'], check=False)
|
||||
if result.returncode == 0:
|
||||
logger.info("Sent SIGHUP to cell-dns")
|
||||
return True
|
||||
logger.warning(f"SIGHUP to cell-dns failed: {result.stderr.strip()}")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"reload_coredns: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def apply_all_dns_rules(peers: List[Dict[str, Any]], corefile_path: str = COREFILE_PATH) -> bool:
|
||||
"""Regenerate Corefile and reload CoreDNS."""
|
||||
ok = generate_corefile(peers, corefile_path)
|
||||
if ok:
|
||||
reload_coredns()
|
||||
return ok
|
||||
@@ -136,6 +136,10 @@ class WireGuardManager(BaseServiceManager):
|
||||
)
|
||||
|
||||
def _config_file(self) -> str:
|
||||
# linuxserver/wireguard stores configs in wg_confs/
|
||||
wg_confs = os.path.join(self.wireguard_dir, 'wg_confs')
|
||||
if os.path.isdir(wg_confs):
|
||||
return os.path.join(wg_confs, 'wg0.conf')
|
||||
return os.path.join(self.wireguard_dir, 'wg0.conf')
|
||||
|
||||
def _read_config(self) -> str:
|
||||
@@ -148,14 +152,95 @@ class WireGuardManager(BaseServiceManager):
|
||||
def _write_config(self, content: str):
|
||||
with open(self._config_file(), 'w') as f:
|
||||
f.write(content)
|
||||
self._syncconf()
|
||||
|
||||
def _syncconf(self):
|
||||
"""Sync live WireGuard peers using 'wg set' — never touches [Interface] settings.
|
||||
|
||||
wg syncconf resets the ListenPort when given a peers-only config,
|
||||
breaking client connections. We diff the config file against the live
|
||||
interface and add/remove peers individually instead.
|
||||
"""
|
||||
import subprocess, re
|
||||
try:
|
||||
# Parse desired peers from config file
|
||||
content = self._read_config()
|
||||
desired: dict = {}
|
||||
current_peer = None
|
||||
for line in content.splitlines():
|
||||
line = line.strip()
|
||||
if line == '[Peer]':
|
||||
current_peer = {}
|
||||
elif current_peer is not None:
|
||||
if line.startswith('PublicKey'):
|
||||
current_peer['pub'] = line.split('=', 1)[1].strip()
|
||||
elif line.startswith('AllowedIPs'):
|
||||
current_peer['ips'] = line.split('=', 1)[1].strip()
|
||||
elif line.startswith('PersistentKeepalive'):
|
||||
current_peer['ka'] = line.split('=', 1)[1].strip()
|
||||
elif line == '' and 'pub' in current_peer:
|
||||
desired[current_peer['pub']] = current_peer
|
||||
current_peer = None
|
||||
if current_peer and 'pub' in current_peer:
|
||||
desired[current_peer['pub']] = current_peer
|
||||
|
||||
# Get live peers
|
||||
dump = subprocess.run(
|
||||
['docker', 'exec', 'cell-wireguard', 'wg', 'show', 'wg0', 'dump'],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
live_pubs = set()
|
||||
for line in dump.stdout.splitlines():
|
||||
parts = line.split('\t')
|
||||
if len(parts) >= 4 and parts[0] not in ('(none)', ''):
|
||||
live_pubs.add(parts[0])
|
||||
|
||||
# Remove peers no longer in config
|
||||
for pub in live_pubs - set(desired):
|
||||
subprocess.run(
|
||||
['docker', 'exec', 'cell-wireguard', 'wg', 'set', 'wg0',
|
||||
'peer', pub, 'remove'],
|
||||
capture_output=True, timeout=5
|
||||
)
|
||||
logger.info(f'wg: removed peer {pub[:16]}...')
|
||||
|
||||
# Add/update peers in config
|
||||
for pub, p in desired.items():
|
||||
args = ['docker', 'exec', 'cell-wireguard', 'wg', 'set', 'wg0',
|
||||
'peer', pub,
|
||||
'allowed-ips', p.get('ips', ''),
|
||||
'persistent-keepalive', p.get('ka', '25')]
|
||||
subprocess.run(args, capture_output=True, timeout=5)
|
||||
|
||||
logger.info(f'wg set applied: {len(desired)} peers')
|
||||
except Exception as e:
|
||||
logger.warning(f'_syncconf failed (non-fatal): {e}')
|
||||
|
||||
# ── Peer CRUD ─────────────────────────────────────────────────────────────
|
||||
|
||||
def add_peer(self, name: str, public_key: str, endpoint_ip: str,
|
||||
allowed_ips: str = SERVER_NETWORK,
|
||||
persistent_keepalive: int = 25) -> bool:
|
||||
"""Add a [Peer] block to wg0.conf."""
|
||||
"""Add a [Peer] block to wg0.conf.
|
||||
|
||||
Server-side AllowedIPs must be the peer's specific VPN IP (/32).
|
||||
Passing full-tunnel or split-tunnel CIDRs here would cause the server
|
||||
to route all internet or LAN traffic to that peer — breaking everything.
|
||||
"""
|
||||
import ipaddress
|
||||
try:
|
||||
# Enforce /32: reject any CIDR wider than a single host
|
||||
for cidr in (c.strip() for c in allowed_ips.split(',')):
|
||||
try:
|
||||
net = ipaddress.ip_network(cidr, strict=False)
|
||||
if net.prefixlen < 32 and not cidr.endswith('/32'):
|
||||
raise ValueError(
|
||||
f"Server-side AllowedIPs must be a /32 host address, got '{cidr}'. "
|
||||
"Full/split tunnel CIDRs belong in the CLIENT config only."
|
||||
)
|
||||
except ValueError as ve:
|
||||
raise ve
|
||||
|
||||
content = self._read_config()
|
||||
peer_block = (
|
||||
f'\n[Peer]\n'
|
||||
|
||||
Reference in New Issue
Block a user