#!/usr/bin/env python3 """ Personal Internet Cell API Server Provides REST API endpoints for managing: - Cell status and configuration - Network services (DNS, DHCP, NTP) - WireGuard VPN and peer management - Email, Calendar, and File services - Routing and VPN gateway - Vault and trust management (Phase 6) """ import os import io import json import stat import zipfile import shutil import logging import secrets from datetime import datetime from flask import Flask, request, jsonify, current_app, send_file, session from flask_cors import CORS import threading import time from collections import deque import json as pyjson from logging.handlers import RotatingFileHandler import uuid import contextvars # Track API start time for uptime calculation API_START_TIME = time.time() # Manager singletons — all instantiated in managers.py; imported here so routes can # reference them by module-level name and test patches (`patch('app.X', mock)`) work. from managers import ( config_manager, service_bus, log_manager, network_manager, wireguard_manager, peer_registry, email_manager, calendar_manager, file_manager, routing_manager, vault_manager, container_manager, cell_link_manager, auth_manager, firewall_manager, EventType, ) # Re-exports: tests do `from app import CellManager` and `from app import _resolve_peer_dns` from cell_manager import CellManager from wireguard_manager import _resolve_peer_dns from port_registry import PORT_FIELDS, detect_conflicts import auth_routes # Context variable for request info request_context = contextvars.ContextVar('request_context', default={}) # Set default log level and log file if not already defined LOG_LEVEL = globals().get('LOG_LEVEL', 'INFO') LOG_FILE = globals().get('LOG_FILE', 'picell.log') class ContextFilter(logging.Filter): def filter(self, record): ctx = request_context.get({}) for k, v in ctx.items(): setattr(record, k, v) return True class JsonFormatter(logging.Formatter): def format(self, record): log_record = { 'timestamp': self.formatTime(record, self.datefmt), 'level': record.levelname, 'name': record.name, 'message': record.getMessage(), 'request_id': getattr(record, 'request_id', None), 'client_ip': getattr(record, 'client_ip', None), 'method': getattr(record, 'method', None), 'path': getattr(record, 'path', None), 'status': getattr(record, 'status', None), 'user': getattr(record, 'user', None), } if record.exc_info: log_record['exception'] = self.formatException(record.exc_info) return pyjson.dumps({k: v for k, v in log_record.items() if v is not None}) json_formatter = JsonFormatter() context_filter = ContextFilter() handlers = [logging.StreamHandler()] try: file_handler = RotatingFileHandler(LOG_FILE, maxBytes=5_000_000, backupCount=5, encoding='utf-8') file_handler.setLevel(getattr(logging, LOG_LEVEL, logging.INFO)) file_handler.setFormatter(json_formatter) file_handler.addFilter(context_filter) handlers.append(file_handler) except Exception as e: print(f"Warning: Could not create rotating log file handler: {e}") for h in handlers: h.setFormatter(json_formatter) h.addFilter(context_filter) logging.basicConfig( level=getattr(logging, LOG_LEVEL, logging.INFO), handlers=handlers ) logger = logging.getLogger('picell') # Flask app setup app = Flask(__name__) CORS(app, supports_credentials=True, origins=['http://localhost', 'http://localhost:5173', 'http://localhost:8081', 'http://127.0.0.1', 'http://127.0.0.1:5173', 'http://127.0.0.1:8081']) # Development mode flag app.config['DEVELOPMENT_MODE'] = True # Set to True for development, False for production # Persist SECRET_KEY so sessions survive API restarts SECRET_KEY_FILE = os.path.join(os.environ.get('DATA_DIR', '/app/data'), '.flask_secret_key') if os.environ.get('SECRET_KEY'): _flask_secret = os.environ['SECRET_KEY'].encode() if isinstance(os.environ['SECRET_KEY'], str) else os.environ['SECRET_KEY'] elif os.path.exists(SECRET_KEY_FILE) and os.path.getsize(SECRET_KEY_FILE) > 0: with open(SECRET_KEY_FILE, 'rb') as _skf: _flask_secret = _skf.read() else: _flask_secret = os.urandom(32) try: os.makedirs(os.path.dirname(SECRET_KEY_FILE), exist_ok=True) _skf_fd = os.open(SECRET_KEY_FILE, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600) with os.fdopen(_skf_fd, 'wb') as _skf: _skf.write(_flask_secret) except OSError as _e: logger.warning(f"Could not persist SECRET_KEY to disk: {_e}") app.config['SECRET_KEY'] = _flask_secret app.config['SESSION_COOKIE_HTTPONLY'] = True app.config['SESSION_COOKIE_SAMESITE'] = 'Lax' # config_manager, service_bus, log_manager and all other managers are imported # from managers.py above — no re-instantiation needed here. @app.before_request def enrich_log_context(): req_id = str(uuid.uuid4()) client_ip = request.remote_addr method = request.method path = request.path user = getattr(getattr(request, 'user', None), 'id', None) or 'anonymous' request_context.set({ 'request_id': req_id, 'client_ip': client_ip, 'method': method, 'path': path, 'user': user }) @app.before_request def enforce_auth(): """Enforce session-based authentication and role-based access control. Rules: - /api/auth/* is always public (login, logout, me, change-password) - Non-/api/ paths (e.g. /health) are always public - /api/peer/* is accessible to peer role only (admin gets 403) - All other /api/* routes require admin role Enforcement is active when auth_manager is a real AuthManager instance with at least one registered user. Tests that do not seed the auth store will see an empty user list and bypass enforcement, preserving backward-compatibility with pre-auth test suites. """ path = request.path # Always allow non-API paths and auth namespace if not path.startswith('/api/') or path.startswith('/api/auth/'): return None # Only enforce when auth_manager has been properly initialised and seeded. # When the user store is empty (file missing or unreadable — typical in # unit tests and fresh installs), bypass enforcement so pre-auth test # suites continue to work. 503 is only returned when the users file # exists and is readable but contains no accounts (explicit misconfiguration). try: from auth_manager import AuthManager as _AuthManager if not isinstance(auth_manager, _AuthManager): return None users = auth_manager.list_users() if not users: # Only fail closed when the auth file is readable but empty — # that's an explicit misconfiguration. If the file is missing or # unreadable (test env, wrong host path, permission denied), bypass # so pre-auth test suites continue to work. users_file = getattr(auth_manager, '_users_file', None) if users_file: try: with open(users_file, 'r') as _f: _f.read(1) return jsonify({'error': 'Authentication not configured. Set admin password first.'}), 503 except (PermissionError, FileNotFoundError, OSError): return None return None except Exception: return None username = session.get('username') if not username: return jsonify({'error': 'Not authenticated'}), 401 role = session.get('role') if path.startswith('/api/peer/'): if role != 'peer': return jsonify({'error': 'Forbidden'}), 403 else: if role != 'admin': return jsonify({'error': 'Forbidden'}), 403 return None @app.before_request def check_csrf(): """Double-submit CSRF protection for state-changing API requests. Applies to POST/PUT/DELETE/PATCH on /api/* paths, excluding /api/auth/*. Skipped entirely when app.config['TESTING'] is True so unit tests remain unaffected without needing to set CSRF headers. """ if app.config.get('TESTING'): return None if request.method not in ('POST', 'PUT', 'DELETE', 'PATCH'): return None path = request.path if not path.startswith('/api/') or path.startswith('/api/auth/'): return None token_session = session.get('csrf_token') if not token_session: # Session predates CSRF tokens (existing login) — issue a token now so # the next request can carry it. Don't block this request; the client # couldn't have known the token yet. session['csrf_token'] = secrets.token_hex(32) return None token_header = request.headers.get('X-CSRF-Token') if not token_header or token_header != token_session: return jsonify({'error': 'CSRF token missing or invalid'}), 403 return None @app.after_request def log_request(response): ctx = request_context.get({}) ctx['status'] = response.status_code logger.info(f"{ctx.get('method')} {ctx.get('path')} {ctx.get('status')}") return response @app.teardown_request def clear_log_context(exc): request_context.set({}) # Wire vault_manager into Flask app context (vault routes use current_app.vault_manager) app.vault_manager = vault_manager auth_routes.auth_manager = auth_manager # Apply firewall + DNS rules from stored peer settings (survives API restarts) def _configured_domain() -> str: return config_manager.configs.get('_identity', {}).get('domain', 'cell') def _apply_startup_enforcement(): try: peers = peer_registry.list_peers() firewall_manager.apply_all_peer_rules(peers) firewall_manager.apply_all_dns_rules(peers, COREFILE_PATH, _configured_domain(), cell_links=cell_link_manager.list_connections()) logger.info(f"Applied enforcement rules for {len(peers)} peers on startup") except Exception as e: logger.warning(f"Startup enforcement failed (non-fatal): {e}") def _bootstrap_dns(): try: identity = config_manager.configs.get('_identity', {}) cell_name = identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell')) domain = identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell')) ip_range = identity.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')) network_manager.bootstrap_dns_records(cell_name, domain, ip_range) except Exception as e: logger.warning(f"DNS bootstrap failed (non-fatal): {e}") COREFILE_PATH = '/app/config/dns/Corefile' def _recover_pending_apply(): """If the previous all-services apply was interrupted (helper died mid-run), clear the 'applying' flag so the UI shows the changes as still pending (not stuck in an 'applying' spinner) and the user can retry.""" try: pending = config_manager.configs.get('_pending_restart', {}) if pending.get('applying') and pending.get('needs_restart'): config_manager.configs['_pending_restart']['applying'] = False config_manager._save_all_configs() logger.warning("Previous config apply did not complete — pending changes restored for retry") except Exception as e: logger.warning(f"Pending apply recovery check failed: {e}") _recover_pending_apply() # Run in background so startup isn't blocked waiting on docker exec threading.Thread(target=_apply_startup_enforcement, daemon=True).start() threading.Thread(target=_bootstrap_dns, daemon=True).start() # Register services with service bus service_bus.register_service('network', network_manager) service_bus.register_service('wireguard', wireguard_manager) service_bus.register_service('email', email_manager) service_bus.register_service('calendar', calendar_manager) service_bus.register_service('files', file_manager) service_bus.register_service('routing', routing_manager) service_bus.register_service('vault', app.vault_manager) service_bus.register_service('container', container_manager) # Register auth blueprint app.register_blueprint(auth_routes.auth_bp) # Register service blueprints (routes extracted from this file) from routes.email import bp as _email_bp from routes.calendar import bp as _calendar_bp from routes.files import bp as _files_bp from routes.network import bp as _network_bp from routes.wireguard import bp as _wireguard_bp from routes.cells import bp as _cells_bp from routes.peers import bp as _peers_bp from routes.routing import bp as _routing_bp from routes.vault import bp as _vault_bp from routes.containers import bp as _containers_bp from routes.services import bp as _services_bp from routes.peer_dashboard import bp as _peer_dashboard_bp app.register_blueprint(_email_bp) app.register_blueprint(_calendar_bp) app.register_blueprint(_files_bp) app.register_blueprint(_network_bp) app.register_blueprint(_wireguard_bp) app.register_blueprint(_cells_bp) app.register_blueprint(_peers_bp) app.register_blueprint(_routing_bp) app.register_blueprint(_vault_bp) app.register_blueprint(_containers_bp) app.register_blueprint(_services_bp) app.register_blueprint(_peer_dashboard_bp) # Unified health monitoring HEALTH_HISTORY_SIZE = 100 health_history = deque(maxlen=HEALTH_HISTORY_SIZE) health_monitor_running = True # Health alerting configuration HEALTH_ALERT_THRESHOLD = 3 # Number of consecutive failures before alert service_alert_counters = {} def perform_health_check(): """Perform a unified health check of all services, with alerting.""" try: # Use service bus to get health from all services result = { 'timestamp': datetime.utcnow().isoformat(), 'alerts': [] } # Get health from each service for service_name in service_bus.list_services(): try: service = service_bus.get_service(service_name) if hasattr(service, 'health_check'): health = service.health_check() else: health = service.get_status() result[service_name] = health except Exception as e: result[service_name] = {'error': str(e), 'status': 'offline'} # Health alerting logic — alert only when a service container is not running global service_alert_counters for service_name in service_bus.list_services(): if service_name in result: status = result[service_name] healthy = True if isinstance(status, dict): # Prefer status.running (container actually up) over healthy (connectivity tests) inner = status.get('status', {}) if isinstance(inner, dict): if 'running' in inner: healthy = inner['running'] elif 'status' in inner: healthy = str(inner['status']).lower() in ('ok', 'healthy', 'online', 'active') elif 'running' in status: healthy = status['running'] elif 'error' in status: healthy = False else: healthy = bool(status) # Only count as unhealthy if we're certain it's down if not healthy: service_alert_counters[service_name] = service_alert_counters.get(service_name, 0) + 1 if service_alert_counters[service_name] >= HEALTH_ALERT_THRESHOLD: alert_msg = f"ALERT: {service_name} unhealthy for {service_alert_counters[service_name]} consecutive checks." logger.warning(alert_msg) result['alerts'].append(alert_msg) # Publish alert event service_bus.publish_event(EventType.ERROR_OCCURRED, service_name, { 'error': alert_msg, 'service': service_name, 'consecutive_failures': service_alert_counters[service_name] }) else: # Reset counter if service is healthy if service_alert_counters.get(service_name, 0) > 0: logger.info(f"Service {service_name} recovered, resetting alert counter") service_alert_counters[service_name] = 0 logger.info(f"Unified health check: {result}") return result except Exception as e: logger.error(f"Unified health check failed: {e}") return {'error': str(e), 'timestamp': datetime.utcnow().isoformat()} def health_monitor_loop(): while health_monitor_running: with app.app_context(): health_result = perform_health_check() health_history.appendleft(health_result) # Publish health check event service_bus.publish_event(EventType.HEALTH_CHECK, 'api', health_result) time.sleep(60) # Check every 60 seconds # Start health monitor thread health_monitor_thread = threading.Thread(target=health_monitor_loop, daemon=True) health_monitor_thread.start() def _local_subnets(): """Return all subnets the container is directly connected to (from routing table).""" import ipaddress as _ipa, socket as _sock, struct as _struct nets = [] try: with open('/proc/net/route') as _f: for _line in _f.readlines()[1:]: _parts = _line.strip().split() if len(_parts) < 8 or _parts[0] == 'lo': continue _dest = _sock.inet_ntoa(_struct.pack('" still passes because the last entry is the # real IP appended by Caddy. An attacker directly hitting Flask on :3000 # could craft any XFF they like, but in the Docker topology port 3000 is # not exposed to the internet. remote_addr = request.remote_addr def _allowed(addr): if not addr: return False if addr in ('127.0.0.1', '::1', 'localhost'): return True try: import ipaddress as _ipa ip = _ipa.ip_address(addr.strip()) if ip.is_loopback: return True # Only trust loopback and Docker bridge (172.16.0.0/12). # Deliberately excludes 10.0.0.0/8 (WireGuard peer subnet) and # 192.168.0.0/16 (LAN) — VPN peers must not access local-only endpoints. if ip in _ipa.ip_network('172.16.0.0/12'): return True # Any subnet the container is directly attached to (handles non-RFC-1918 # Docker bridge networks such as 172.0.0.0/24). for _net in _local_subnets(): if ip in _net: return True except Exception: pass return False if _allowed(remote_addr): return True # Check the last X-Forwarded-For entry (appended by the trusted proxy). # Never trust any entry other than the last one. try: xff = request.headers.get('X-Forwarded-For', '') if xff: last_ip = xff.split(',')[-1].strip() if last_ip and _allowed(last_ip): return True except Exception: pass return False @app.route('/health', methods=['GET']) def health_check(): """Health check endpoint.""" try: return jsonify({ "status": "healthy", "timestamp": datetime.utcnow().isoformat(), "version": "1.0.0" }) except Exception as e: logger.error(f"Health check failed: {e}") return jsonify({"error": str(e)}), 500 @app.route('/api/status', methods=['GET']) def get_cell_status(): """Get overall cell status.""" try: # Use service bus to get status from all services services_status = {} for service_name in service_bus.list_services(): try: service = service_bus.get_service(service_name) services_status[service_name] = service.get_status() except Exception as e: services_status[service_name] = {'error': str(e)} peers = peer_registry.list_peers() # Calculate actual uptime current_time = time.time() uptime_seconds = int(current_time - API_START_TIME) identity = config_manager.configs.get('_identity', {}) return jsonify({ "cell_name": identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell')), "domain": identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell')), "uptime": uptime_seconds, "peers_count": len(peers), "services": services_status, "timestamp": datetime.utcnow().isoformat() }) except Exception as e: logger.error(f"Error getting cell status: {e}") return jsonify({"error": str(e)}), 500 @app.route('/api/config', methods=['GET']) def get_config(): """Get cell configuration.""" try: service_configs = config_manager.get_all_configs() identity = service_configs.pop('_identity', {}) config = { 'cell_name': identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell')), 'domain': identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell')), 'ip_range': identity.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')), 'wireguard_port': identity.get('wireguard_port', int(os.environ.get('WG_PORT', '51820'))), } # Expose computed per-service IPs so the frontend doesn't need to derive them import ip_utils as _ip_utils_cfg _ips = _ip_utils_cfg.get_service_ips(config['ip_range']) config['service_ips'] = { 'dns': _ips['dns'], 'vip_mail': _ips['vip_mail'], 'vip_calendar': _ips['vip_calendar'], 'vip_files': _ips['vip_files'], 'vip_webdav': _ips['vip_webdav'], } config['service_configs'] = service_configs return jsonify(config) except Exception as e: logger.error(f"Error getting config: {e}") return jsonify({"error": str(e)}), 500 @app.route('/api/config', methods=['PUT']) def update_config(): """Update cell configuration.""" try: data = request.get_json(silent=True) if data is None: return jsonify({"error": "No data provided"}), 400 # Handle identity fields (cell_name, domain, ip_range, wireguard_port) identity_keys = {'cell_name', 'domain', 'ip_range', 'wireguard_port'} identity_updates = {k: v for k, v in data.items() if k in identity_keys} # Validate cell_name and domain — block injection characters while # allowing the full range of valid hostname/domain characters. import re as _re_cfg # cell_name: hostname component — letters, digits, hyphens only (no dots) _CELL_NAME_RE = _re_cfg.compile(r'^[a-zA-Z0-9][a-zA-Z0-9-]{0,254}$') # domain: may include dots for multi-label names (e.g. home.lan) _DOMAIN_RE = _re_cfg.compile(r'^[a-zA-Z0-9][a-zA-Z0-9.-]{0,254}$') if 'cell_name' in identity_updates: v = str(identity_updates['cell_name']) if not v: return jsonify({'error': 'cell_name cannot be empty'}), 400 if len(v) > 255: return jsonify({'error': 'cell_name must be 255 characters or fewer'}), 400 if not _CELL_NAME_RE.match(v): return jsonify({'error': 'Invalid cell_name: use only letters, digits, hyphens'}), 400 if 'domain' in identity_updates: v = str(identity_updates['domain']) if not v: return jsonify({'error': 'domain cannot be empty'}), 400 if len(v) > 255: return jsonify({'error': 'domain must be 255 characters or fewer'}), 400 if not _DOMAIN_RE.match(v): return jsonify({'error': 'Invalid domain: use only letters, digits, hyphens, dots'}), 400 # Validate ip_range — must be a valid CIDR within an RFC-1918 range if 'ip_range' in identity_updates: import ipaddress as _ipa _rfc1918 = [ _ipa.ip_network('10.0.0.0/8'), _ipa.ip_network('172.16.0.0/12'), _ipa.ip_network('192.168.0.0/16'), ] try: _raw = str(identity_updates['ip_range']) if '/' not in _raw: return jsonify({'error': 'ip_range must include a CIDR prefix (e.g. 172.20.0.0/16)'}), 400 _net = _ipa.ip_network(_raw, strict=False) if not any(_net.subnet_of(r) for r in _rfc1918): return jsonify({'error': ( 'ip_range must be within an RFC-1918 private range ' '(10.0.0.0/8, 172.16.0.0/12, or 192.168.0.0/16)' )}), 400 except ValueError as _e: return jsonify({'error': f'Invalid ip_range: {_e}'}), 400 # Validate service config port and IP fields _port_fields = { 'network': ['dns_port'], 'wireguard': ['port'], 'email': ['smtp_port', 'submission_port', 'imap_port', 'webmail_port'], 'calendar': ['port'], 'files': ['port', 'manager_port'], } for _svc, _fields in _port_fields.items(): if _svc not in data: continue _svc_data = data[_svc] if not isinstance(_svc_data, dict): continue for _f in _fields: if _f in _svc_data and _svc_data[_f] is not None and _svc_data[_f] != '': try: _p = int(_svc_data[_f]) if not (1 <= _p <= 65535): raise ValueError() except (ValueError, TypeError): return jsonify({'error': f'{_svc}.{_f} must be an integer between 1 and 65535'}), 400 # Validate that no two service sections use the same port number _conflicts = detect_conflicts(config_manager.configs, data) if _conflicts: _msgs = [] for _c in _conflicts: _pairs = ', '.join(f"{_s}.{_f}" for _s, _f in _c['conflicts']) _msgs.append(f"port {_c['port']} is used by {_pairs}") return jsonify({'error': 'Port conflict: ' + '; '.join(_msgs)}), 409 # Validate WireGuard address (must be valid IP/CIDR) if 'wireguard' in data and isinstance(data['wireguard'], dict): _addr = data['wireguard'].get('address') if _addr: import ipaddress as _ipa2 if '/' not in str(_addr): return jsonify({'error': 'wireguard.address must include a prefix length (e.g. 10.0.0.1/24)'}), 400 try: _ipa2.ip_interface(_addr) except ValueError as _e: return jsonify({'error': f'wireguard.address is not a valid IP/CIDR: {_e}'}), 400 # Capture old identity and service configs BEFORE saving, for change detection + revert import copy as _copy old_identity = dict(config_manager.configs.get('_identity', {})) old_svc_configs = { svc: dict(config_manager.configs.get(svc, {})) for svc in data if svc in config_manager.service_schemas } # Full pre-change snapshot — used by Discard to revert to original state. # Must be captured here, before any config writes, so it holds the true old values. _pre_change_snapshot = {k: _copy.deepcopy(v) for k, v in config_manager.configs.items() if not k.startswith('_')} _pre_change_snapshot['_identity'] = _copy.deepcopy(config_manager.configs.get('_identity', {})) if identity_updates: stored = config_manager.configs.get('_identity', {}) stored.update(identity_updates) config_manager.configs['_identity'] = stored config_manager._save_all_configs() # Map service names to their manager instances _svc_managers = { 'network': network_manager, 'wireguard': wireguard_manager, 'email': email_manager, 'calendar': calendar_manager, 'files': file_manager, 'routing': routing_manager, 'vault': app.vault_manager, } all_restarted = [] all_warnings = [] # Update service configurations: persist + apply to real config files for service, config in data.items(): if service in config_manager.service_schemas: config_manager.update_service_config(service, config) mgr = _svc_managers.get(service) if mgr: mgr.update_config(config) result = mgr.apply_config(config) all_restarted.extend(result.get('restarted', [])) all_warnings.extend(result.get('warnings', [])) service_bus.publish_event(EventType.CONFIG_CHANGED, service, { 'service': service, 'config': config }) # VPN port or subnet change → all peer client configs are stale if service == 'wireguard' and ('port' in config or 'address' in config): for p in peer_registry.list_peers(): peer_registry.update_peer(p['peer'], {'config_needs_reinstall': True}) n = len(peer_registry.list_peers()) if n: all_warnings.append(f'WireGuard endpoint changed — {n} peer(s) must reinstall VPN config') # Keep identity.wireguard_port in sync with service config port if 'port' in config: _id = config_manager.configs.get('_identity', {}) _id['wireguard_port'] = config['port'] config_manager.configs['_identity'] = _id config_manager._save_all_configs() # Apply cell identity domain to network and email services (write files, defer reload) if identity_updates.get('domain') and identity_updates['domain'] != old_identity.get('domain', ''): domain = identity_updates['domain'] net_result = network_manager.apply_domain(domain, reload=False) all_warnings.extend(net_result.get('warnings', [])) # Regenerate Caddyfile — virtual host names change with the domain import ip_utils as _ip_domain _cur_id = config_manager.configs.get('_identity', {}) _cur_range = _cur_id.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')) _cur_name = _cur_id.get('cell_name', os.environ.get('CELL_NAME', 'mycell')) _ip_domain.write_caddyfile(_cur_range, _cur_name, domain, '/app/config-caddy/Caddyfile') _set_pending_restart( [f'domain changed to {domain}'], ['dns', 'caddy'], pre_change_snapshot=_pre_change_snapshot, ) # Apply cell name change to DNS hostname record (write files, defer reload) if identity_updates.get('cell_name'): old_name = old_identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell')) new_name = identity_updates['cell_name'] if old_name != new_name: cn_result = network_manager.apply_cell_name(old_name, new_name, reload=False) all_warnings.extend(cn_result.get('warnings', [])) # Regenerate Caddyfile — main virtual host name changes with cell_name import ip_utils as _ip_name _cur_id2 = config_manager.configs.get('_identity', {}) _cur_range2 = _cur_id2.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')) _cur_domain2 = identity_updates.get('domain') or _cur_id2.get('domain', os.environ.get('CELL_DOMAIN', 'cell')) _ip_name.write_caddyfile(_cur_range2, new_name, _cur_domain2, '/app/config-caddy/Caddyfile') _set_pending_restart( [f'cell_name changed to {new_name}'], ['dns'], pre_change_snapshot=_pre_change_snapshot, ) # Apply ip_range change: regenerate DNS records, update virtual IPs + firewall rules if identity_updates.get('ip_range') and identity_updates['ip_range'] != old_identity.get('ip_range', ''): import ip_utils new_range = identity_updates['ip_range'] cur_identity = config_manager.configs.get('_identity', {}) cur_cell_name = cur_identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell')) cur_domain = cur_identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell')) # Update DNS zone records immediately ip_result = network_manager.apply_ip_range(new_range, cur_cell_name, cur_domain) all_restarted.extend(ip_result.get('restarted', [])) all_warnings.extend(ip_result.get('warnings', [])) # Update firewall virtual IPs (iptables) and Caddy virtual IPs immediately firewall_manager.update_service_ips(new_range) firewall_manager.ensure_caddy_virtual_ips() # Write new .env with updated IPs (and current ports) for next container start env_file = os.environ.get('COMPOSE_ENV_FILE', '/app/.env.compose') ip_utils.write_env_file(new_range, env_file, _collect_service_ports(config_manager.configs)) # Regenerate Caddyfile with new VIPs ip_utils.write_caddyfile(new_range, cur_cell_name, cur_domain, '/app/config-caddy/Caddyfile') # Mark ALL containers as needing restart; network_recreate signals that # docker compose down is required before up (Docker can't change subnet in-place) _set_pending_restart( [f'ip_range changed to {new_range} — network will be recreated'], ['*'], network_recreate=True, pre_change_snapshot=_pre_change_snapshot, ) # Detect port changes across service configs and identity # Maps (service_key, field_name) → (port_env_key, [containers]) _PORT_CHANGE_MAP = { ('network', 'dns_port'): ('dns_port', ['dns']), ('wireguard','port'): ('wg_port', ['wireguard']), ('email', 'smtp_port'): ('mail_smtp_port', ['mail']), ('email', 'submission_port'): ('mail_submission_port', ['mail']), ('email', 'imap_port'): ('mail_imap_port', ['mail']), ('email', 'webmail_port'): ('rainloop_port', ['rainloop']), ('calendar', 'port'): ('radicale_port', ['radicale']), ('files', 'port'): ('webdav_port', ['webdav']), ('files', 'manager_port'): ('filegator_port', ['filegator']), } port_changed_containers = set() port_change_messages = [] import ip_utils as _ip_utils_pcd for (svc_key, field), (_env_key, containers) in _PORT_CHANGE_MAP.items(): if svc_key in data and field in data[svc_key]: default_val = _ip_utils_pcd.PORT_DEFAULTS.get(_env_key) old_val = old_svc_configs.get(svc_key, {}).get(field, default_val) new_val = data[svc_key][field] if old_val != new_val: port_changed_containers.update(containers) port_change_messages.append( f'{svc_key} {field}: {old_val} → {new_val}' ) # wireguard_port in identity also drives WG_PORT env var; sync to service config if 'wireguard_port' in identity_updates: old_wg = old_identity.get('wireguard_port', _ip_utils_pcd.PORT_DEFAULTS.get('wg_port', 51820)) new_wg = identity_updates['wireguard_port'] if old_wg != new_wg: # Sync to wireguard service config and update wg0.conf _wg_svc = config_manager.configs.get('wireguard', {}) _wg_svc['port'] = new_wg config_manager.update_service_config('wireguard', _wg_svc) wireguard_manager.apply_config({'port': new_wg}) port_changed_containers.add('wireguard') port_change_messages.append(f'wireguard_port: {old_wg} → {new_wg}') if port_changed_containers: import ip_utils as _ip_utils_ports _env_file = os.environ.get('COMPOSE_ENV_FILE', '/app/.env.compose') _ip_range = config_manager.configs.get('_identity', {}).get( 'ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16') ) _ip_utils_ports.write_env_file( _ip_range, _env_file, _collect_service_ports(config_manager.configs) ) _set_pending_restart(port_change_messages, list(port_changed_containers), pre_change_snapshot=_pre_change_snapshot) logger.info(f"Updated config, restarted: {all_restarted}") return jsonify({ "message": "Configuration updated and applied", "restarted": all_restarted, "warnings": all_warnings, }) except Exception as e: logger.error(f"Error updating config: {e}") return jsonify({"error": str(e)}), 500 # --------------------------------------------------------------------------- # Pending-restart helpers # --------------------------------------------------------------------------- def _collect_service_ports(configs: dict) -> dict: """Extract current port values from service configs for .env generation.""" ports = {} net = configs.get('network', {}) wg = configs.get('wireguard', {}) email = configs.get('email', {}) cal = configs.get('calendar', {}) files = configs.get('files', {}) identity = configs.get('_identity', {}) if 'dns_port' in net: ports['dns_port'] = net['dns_port'] if 'port' in wg: ports['wg_port'] = wg['port'] elif 'wireguard_port' in identity: ports['wg_port'] = identity['wireguard_port'] if 'smtp_port' in email: ports['mail_smtp_port'] = email['smtp_port'] if 'submission_port' in email: ports['mail_submission_port'] = email['submission_port'] if 'imap_port' in email: ports['mail_imap_port'] = email['imap_port'] if 'webmail_port' in email: ports['rainloop_port'] = email['webmail_port'] if 'port' in cal: ports['radicale_port'] = cal['port'] if 'port' in files: ports['webdav_port'] = files['port'] if 'manager_port' in files: ports['filegator_port'] = files['manager_port'] return ports def _dedup_changes(existing: list, new: list) -> list: """Merge change lists, keeping only the latest entry per config key.""" def key_of(msg: str) -> str: # "ip_range changed to X" → "ip_range" if ' changed' in msg: return msg.split(' changed')[0].strip() # "network dns_port: 52 → 53" → "network dns_port" if ':' in msg: return msg.split(':')[0].strip() return msg merged = {key_of(c): c for c in existing} merged.update({key_of(c): c for c in new}) return list(merged.values()) def _set_pending_restart(changes: list, containers: list = None, network_recreate: bool = False, pre_change_snapshot: dict = None): """Record that specific containers need to be restarted to apply configuration. containers: list of docker-compose service names, or None/'*' to restart all. network_recreate: True when the Docker bridge subnet changed (requires down+up). pre_change_snapshot: full config captured BEFORE this save (for Discard to revert). Merges with any existing pending state so multiple changes accumulate. """ from datetime import datetime as _dt existing = config_manager.configs.get('_pending_restart', {}) existing_changes = existing.get('changes', []) if existing.get('needs_restart') else [] existing_containers = existing.get('containers', []) if existing.get('needs_restart') else [] # Keep the oldest snapshot (the true pre-change state). Never overwrite it with a # later snapshot — subsequent changes while pending should still revert to origin. if not existing.get('needs_restart'): snapshot = pre_change_snapshot or {} else: snapshot = existing.get('_snapshot', {}) if containers is None or '*' in (containers or []) or existing_containers == ['*']: new_containers = ['*'] else: new_containers = list(set(existing_containers) | set(containers)) config_manager.configs['_pending_restart'] = { 'needs_restart': True, 'changed_at': _dt.utcnow().isoformat(), 'changes': _dedup_changes(existing_changes, changes), 'containers': new_containers, 'network_recreate': network_recreate or existing.get('network_recreate', False), '_snapshot': snapshot, } config_manager._save_all_configs() def _clear_pending_restart(): config_manager.configs['_pending_restart'] = { 'needs_restart': False, 'changes': [], 'containers': [], 'network_recreate': False } config_manager._save_all_configs() @app.route('/api/config/pending', methods=['GET']) def get_pending_config(): """Return whether there are unapplied configuration changes that require a restart.""" pending = config_manager.configs.get('_pending_restart', {}) return jsonify({ 'needs_restart': pending.get('needs_restart', False), 'applying': pending.get('applying', False), 'changed_at': pending.get('changed_at'), 'changes': pending.get('changes', []), 'containers': pending.get('containers', ['*']), }) @app.route('/api/config/pending', methods=['DELETE']) def cancel_pending_config(): """Discard pending configuration changes and restore config to pre-change snapshot.""" pending = config_manager.configs.get('_pending_restart', {}) snapshot = pending.get('_snapshot', {}) if snapshot: # Capture current (changed) identity before reverting, to rewrite config files cur_identity = dict(config_manager.configs.get('_identity', {})) old_identity = snapshot.get('_identity', {}) # Restore config values from snapshot for k, v in snapshot.items(): config_manager.configs[k] = v # Rewrite DNS/Caddy config files back to old values so they match restored config import ip_utils as _ip_revert _id = config_manager.configs.get('_identity', {}) _range = _id.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')) _cell = _id.get('cell_name', os.environ.get('CELL_NAME', 'mycell')) _dom = _id.get('domain', os.environ.get('CELL_DOMAIN', 'cell')) cur_domain = cur_identity.get('domain', '') old_domain = old_identity.get('domain', '') if cur_domain and old_domain and cur_domain != old_domain: network_manager.apply_domain(old_domain, reload=False) cur_cell_name = cur_identity.get('cell_name', '') old_cell_name = old_identity.get('cell_name', '') if cur_cell_name and old_cell_name and cur_cell_name != old_cell_name: network_manager.apply_cell_name(cur_cell_name, old_cell_name, reload=False) _ip_revert.write_caddyfile(_range, _cell, _dom, '/app/config-caddy/Caddyfile') _clear_pending_restart() return jsonify({'message': 'Pending changes discarded'}) @app.route('/api/config/apply', methods=['POST']) def apply_pending_config(): """Apply pending configuration by restarting containers via docker compose up -d.""" try: pending = config_manager.configs.get('_pending_restart', {}) if not pending.get('needs_restart'): return jsonify({'message': 'No pending changes to apply'}) # Get project working dir, image name, and data-dir host path from our container labels/mounts project_dir = '/home/roof/pic' api_image = 'pic_api:latest' # fallback (docker-compose v1 naming) data_host_path = '/home/roof/pic/data/api' # fallback try: import docker as _docker_sdk _client = _docker_sdk.from_env() _self = _client.containers.get('cell-api') project_dir = _self.labels.get('com.docker.compose.project.working_dir', project_dir) # Use the actual image tag so the helper works regardless of compose version # (docker-compose v1 builds pic_api:latest, compose v2+ builds pic-api:latest) tags = _self.image.tags if tags: api_image = tags[0] # Find the host-side path for /app/data so the helper can clear the pending flag for _m in _self.attrs.get('Mounts', []): if _m.get('Destination') == '/app/data': data_host_path = _m.get('Source', data_host_path) break except Exception: pass containers = pending.get('containers', ['*']) # Check if the IP range (network subnet) is changing — Docker cannot modify an # existing network's subnet in-place, so we need `down` + `up` in that case. needs_network_recreate = pending.get('network_recreate', False) host_env = os.path.join(project_dir, '.env') host_compose = os.path.join(project_dir, 'docker-compose.yml') if '*' in containers: # All-services restart: `docker compose down` or `up -d` may stop/recreate the # API container itself, killing this background thread mid-operation. # Spawn an independent helper container (same image as cell-api) that has docker # CLI and survives cell-api being stopped/recreated. # # Mark as "applying" rather than clearing early. The helper clears the flag on # success by writing to cell_config.json directly (via the /app/data mount). # If the helper fails, needs_restart stays True so the UI continues showing # pending changes after the API restarts. On the next startup, if "applying" # is still set, _recover_pending_apply() resets it so the user can retry. config_manager.configs['_pending_restart']['applying'] = True config_manager._save_all_configs() # Encode the clear script in base64 to avoid shell-quoting issues. import base64 as _b64 _clear_py = ( "import json,os; p='/app/data/cell_config.json';" "f=open(p); d=json.load(f); f.close();" "d['_pending_restart']={'needs_restart':False,'changes':[],'containers':[],'network_recreate':False};" "tmp=p+'.tmp'; open(tmp,'w').write(json.dumps(d,indent=2)); os.replace(tmp,p)" ) _b64_cmd = _b64.b64encode(_clear_py.encode()).decode() clear_flag_cmd = f"python3 -c \"import base64; exec(base64.b64decode('{_b64_cmd}').decode())\"" if needs_network_recreate: helper_script = ( f'sleep 2' f' && docker compose --project-directory {project_dir}' f' -f {host_compose} --env-file {host_env} down' f' && {clear_flag_cmd}' f' && docker compose --project-directory {project_dir}' f' -f {host_compose} --env-file {host_env} up -d' ) else: helper_script = ( f'sleep 2' f' && {clear_flag_cmd}' f' && docker compose --project-directory {project_dir}' f' -f {host_compose} --env-file {host_env} up -d' ) def _do_apply(): import subprocess as _subprocess _subprocess.Popen( ['docker', 'run', '--rm', '-v', '/var/run/docker.sock:/var/run/docker.sock', '-v', f'{project_dir}:{project_dir}', '-v', f'{data_host_path}:/app/data', '--entrypoint', 'sh', api_image, '-c', helper_script], close_fds=True, stdout=_subprocess.DEVNULL, stderr=_subprocess.DEVNULL, ) logger.info( 'spawned helper container for all-services restart' + (' (network_recreate)' if needs_network_recreate else '') ) else: # Specific containers only — API is not affected, run directly from here. # Only clear the pending flag after the subprocess exits with code 0 so that # if the compose command fails the UI still shows changes as pending. def _do_apply(): import time as _time import subprocess as _subprocess _time.sleep(0.3) result = _subprocess.run( ['docker', 'compose', '--project-directory', project_dir, '-f', '/app/docker-compose.yml', '--env-file', '/app/.env.compose', 'up', '-d', '--no-deps', '--force-recreate'] + containers, capture_output=True, text=True, timeout=120, ) if result.returncode != 0: logger.error(f"docker compose up failed: {result.stderr.strip()}") else: logger.info(f'docker compose up completed for: {containers}') _clear_pending_restart() threading.Thread(target=_do_apply, daemon=False).start() return jsonify({ 'message': 'Applying configuration — containers are restarting', 'restart_in_progress': True, }) except Exception as e: logger.error(f"Error applying config: {e}") return jsonify({'error': str(e)}), 500 # Configuration management endpoints @app.route('/api/config/backup', methods=['POST']) def create_config_backup(): """Create configuration backup.""" try: backup_id = config_manager.backup_config() service_bus.publish_event(EventType.BACKUP_CREATED, 'api', { 'backup_id': backup_id, 'timestamp': datetime.utcnow().isoformat() }) return jsonify({"backup_id": backup_id}) except Exception as e: logger.error(f"Error creating backup: {e}") return jsonify({"error": str(e)}), 500 @app.route('/api/config/backups', methods=['GET']) def list_config_backups(): """List available backups.""" try: backups = config_manager.list_backups() return jsonify(backups) except Exception as e: logger.error(f"Error listing backups: {e}") return jsonify({"error": str(e)}), 500 @app.route('/api/config/restore/', methods=['POST']) def restore_config(backup_id): """Restore configuration from backup. Body may contain {services: [...]} for selective restore.""" try: data = request.get_json(silent=True) or {} services = data.get('services') # None = full restore success = config_manager.restore_config(backup_id, services=services) if success: service_bus.publish_event(EventType.RESTORE_COMPLETED, 'api', { 'backup_id': backup_id, 'timestamp': datetime.utcnow().isoformat() }) return jsonify({"message": f"Configuration restored from backup: {backup_id}"}) else: return jsonify({"error": f"Failed to restore backup: {backup_id}"}), 500 except Exception as e: logger.error(f"Error restoring backup: {e}") return jsonify({"error": str(e)}), 500 @app.route('/api/config/export', methods=['GET']) def export_config(): """Export configuration.""" try: format = request.args.get('format', 'json') config_data = config_manager.export_config(format) return jsonify({"config": config_data, "format": format}) except Exception as e: logger.error(f"Error exporting config: {e}") return jsonify({"error": str(e)}), 500 @app.route('/api/config/import', methods=['POST']) def import_config(): """Import configuration.""" try: data = request.get_json(silent=True) if data is None: return jsonify({"error": "No data provided"}), 400 config_data = data.get('config') format = data.get('format', 'json') success = config_manager.import_config(config_data, format) if success: return jsonify({"message": "Configuration imported successfully"}) else: return jsonify({"error": "Failed to import configuration"}), 500 except Exception as e: logger.error(f"Error importing config: {e}") return jsonify({"error": str(e)}), 500 @app.route('/api/config/backups//download', methods=['GET']) def download_backup(backup_id): """Download a backup as a zip file.""" try: from pathlib import Path backup_path = config_manager.backup_dir / backup_id if not backup_path.exists(): return jsonify({'error': f'Backup {backup_id} not found'}), 404 buf = io.BytesIO() with zipfile.ZipFile(buf, 'w', zipfile.ZIP_DEFLATED) as zf: for f in backup_path.rglob('*'): if f.is_file(): zf.write(f, f.relative_to(backup_path)) buf.seek(0) return send_file(buf, mimetype='application/zip', as_attachment=True, download_name=f'{backup_id}.zip') except Exception as e: logger.error(f"Error downloading backup: {e}") return jsonify({'error': str(e)}), 500 @app.route('/api/config/backup/upload', methods=['POST']) def upload_backup(): """Upload a backup zip file.""" try: if 'file' not in request.files: return jsonify({'error': 'No file provided'}), 400 f = request.files['file'] filename = f.filename or '' if filename.endswith('.zip'): backup_id = filename[:-4] else: backup_id = f"backup_{datetime.utcnow().strftime('%Y%m%d_%H%M%S')}" backup_id = ''.join(c for c in backup_id if c.isalnum() or c == '_') backup_path = config_manager.backup_dir / backup_id backup_path.mkdir(parents=True, exist_ok=True) try: with zipfile.ZipFile(io.BytesIO(f.read())) as zf: zf.extractall(backup_path) except zipfile.BadZipFile: shutil.rmtree(backup_path, ignore_errors=True) return jsonify({'error': 'Invalid zip file'}), 400 if not (backup_path / 'manifest.json').exists(): shutil.rmtree(backup_path, ignore_errors=True) return jsonify({'error': 'Invalid backup: missing manifest.json'}), 400 return jsonify({'backup_id': backup_id}) except Exception as e: logger.error(f"Error uploading backup: {e}") return jsonify({'error': str(e)}), 500 @app.route('/api/config/backups/', methods=['DELETE']) def delete_config_backup(backup_id): """Delete a configuration backup.""" try: success = config_manager.delete_backup(backup_id) if success: return jsonify({"message": f"Backup {backup_id} deleted"}) else: return jsonify({"error": f"Failed to delete backup {backup_id}"}), 500 except Exception as e: logger.error(f"Error deleting backup: {e}") return jsonify({"error": str(e)}), 500 @app.route('/api/health/history', methods=['GET']) def get_health_history(): """Get recent unified health check results.""" return jsonify(list(health_history)) @app.route('/api/health/history/clear', methods=['POST']) def clear_health_history(): """Clear health history and reset alert counters.""" global service_alert_counters health_history.clear() service_alert_counters = {} return jsonify({'message': 'Health history cleared'}) if __name__ == '__main__': debug = os.environ.get('FLASK_DEBUG', '0') == '1' app.run(host='0.0.0.0', port=3000, debug=debug)