diff --git a/api/app.py b/api/app.py index 656b7d5..e94510f 100644 --- a/api/app.py +++ b/api/app.py @@ -33,23 +33,20 @@ import contextvars # Track API start time for uptime calculation API_START_TIME = time.time() -from network_manager import NetworkManager -from wireguard_manager import WireGuardManager, _resolve_peer_dns -from peer_registry import PeerRegistry -from email_manager import EmailManager -from calendar_manager import CalendarManager -from file_manager import FileManager -from routing_manager import RoutingManager +# Manager singletons — all instantiated in managers.py; imported here so routes can +# reference them by module-level name and test patches (`patch('app.X', mock)`) work. +from managers import ( + config_manager, service_bus, log_manager, + network_manager, wireguard_manager, peer_registry, + email_manager, calendar_manager, file_manager, + routing_manager, vault_manager, container_manager, + cell_link_manager, auth_manager, + firewall_manager, EventType, +) +# Re-exports: tests do `from app import CellManager` and `from app import _resolve_peer_dns` from cell_manager import CellManager -from vault_manager import VaultManager -from container_manager import ContainerManager -from config_manager import ConfigManager -from service_bus import ServiceBus, EventType -from log_manager import LogManager -from cell_link_manager import CellLinkManager -import firewall_manager +from wireguard_manager import _resolve_peer_dns from port_registry import PORT_FIELDS, detect_conflicts -from auth_manager import AuthManager import auth_routes # Context variable for request info @@ -137,41 +134,8 @@ app.config['SECRET_KEY'] = _flask_secret app.config['SESSION_COOKIE_HTTPONLY'] = True app.config['SESSION_COOKIE_SAMESITE'] = 'Lax' -# Initialize enhanced components -config_manager = ConfigManager( - config_file=os.path.join(os.environ.get('CONFIG_DIR', '/app/config'), 'cell_config.json'), - data_dir=os.environ.get('DATA_DIR', '/app/data'), -) -service_bus = ServiceBus() -log_manager = LogManager(log_dir='./data/logs') - -# Initialize service loggers -service_log_configs = { - 'network': {'level': 'INFO', 'formatter': 'json', 'console': False}, - 'wireguard': {'level': 'INFO', 'formatter': 'json', 'console': False}, - 'email': {'level': 'INFO', 'formatter': 'json', 'console': False}, - 'calendar': {'level': 'INFO', 'formatter': 'json', 'console': False}, - 'files': {'level': 'INFO', 'formatter': 'json', 'console': False}, - 'routing': {'level': 'INFO', 'formatter': 'json', 'console': False}, - 'vault': {'level': 'INFO', 'formatter': 'json', 'console': False}, - 'api': {'level': 'INFO', 'formatter': 'json', 'console': True} -} - -for service, config in service_log_configs.items(): - log_manager.add_service_logger(service, config) - -# Apply any persisted log level overrides -_levels_file = os.path.join(os.path.dirname(__file__), 'config', 'log_levels.json') -if os.path.exists(_levels_file): - try: - with open(_levels_file) as _f: - for _svc, _lvl in json.load(_f).items(): - log_manager.set_service_level(_svc, _lvl) - except Exception: - pass - -# Start service bus -service_bus.start() +# config_manager, service_bus, log_manager and all other managers are imported +# from managers.py above — no re-instantiation needed here. @app.before_request def enrich_log_context(): @@ -285,24 +249,8 @@ def log_request(response): def clear_log_context(exc): request_context.set({}) -# Initialize managers — paths configurable via env for testing -_DATA_DIR = os.environ.get('DATA_DIR', '/app/data') -_CONFIG_DIR = os.environ.get('CONFIG_DIR', '/app/config') - -network_manager = NetworkManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR) -wireguard_manager = WireGuardManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR) -peer_registry = PeerRegistry(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR) -email_manager = EmailManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR) -calendar_manager = CalendarManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR) -file_manager = FileManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR) -routing_manager = RoutingManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR) -app.vault_manager = VaultManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR) -container_manager = ContainerManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR) -cell_link_manager = CellLinkManager( - data_dir=_DATA_DIR, config_dir=_CONFIG_DIR, - wireguard_manager=wireguard_manager, network_manager=network_manager, -) -auth_manager = AuthManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR) +# Wire vault_manager into Flask app context (vault routes use current_app.vault_manager) +app.vault_manager = vault_manager auth_routes.auth_manager = auth_manager # Apply firewall + DNS rules from stored peer settings (survives API restarts) @@ -313,10 +261,12 @@ def _configured_domain() -> str: def _apply_startup_enforcement(): try: peers = peer_registry.list_peers() + cell_links = cell_link_manager.list_connections() firewall_manager.apply_all_peer_rules(peers) + firewall_manager.apply_all_cell_rules(cell_links) firewall_manager.apply_all_dns_rules(peers, COREFILE_PATH, _configured_domain(), - cell_links=cell_link_manager.list_connections()) - logger.info(f"Applied enforcement rules for {len(peers)} peers on startup") + cell_links=cell_links) + logger.info(f"Applied enforcement rules for {len(peers)} peers, {len(cell_links)} cells on startup") except Exception as e: logger.warning(f"Startup enforcement failed (non-fatal): {e}") @@ -332,7 +282,33 @@ def _bootstrap_dns(): COREFILE_PATH = '/app/config/dns/Corefile' + +def _recover_pending_apply(): + """If the previous all-services apply was interrupted (helper died mid-run), + clear the 'applying' flag so the UI shows the changes as still pending (not stuck + in an 'applying' spinner) and the user can retry.""" + try: + pending = config_manager.configs.get('_pending_restart', {}) + if pending.get('applying') and pending.get('needs_restart'): + config_manager.configs['_pending_restart']['applying'] = False + config_manager._save_all_configs() + logger.warning("Previous config apply did not complete — pending changes restored for retry") + except Exception as e: + logger.warning(f"Pending apply recovery check failed: {e}") + + +_recover_pending_apply() + + +def _sync_wg_keys(): + try: + wireguard_manager._sync_keys_from_conf() + except Exception as e: + logger.warning(f"WireGuard key sync failed (non-fatal): {e}") + + # Run in background so startup isn't blocked waiting on docker exec +threading.Thread(target=_sync_wg_keys, daemon=True).start() threading.Thread(target=_apply_startup_enforcement, daemon=True).start() threading.Thread(target=_bootstrap_dns, daemon=True).start() @@ -349,6 +325,40 @@ service_bus.register_service('container', container_manager) # Register auth blueprint app.register_blueprint(auth_routes.auth_bp) +# Register service blueprints (routes extracted from this file) +from routes.email import bp as _email_bp +from routes.calendar import bp as _calendar_bp +from routes.files import bp as _files_bp +from routes.network import bp as _network_bp +from routes.wireguard import bp as _wireguard_bp +from routes.cells import bp as _cells_bp +from routes.peers import bp as _peers_bp +from routes.routing import bp as _routing_bp +from routes.vault import bp as _vault_bp +from routes.containers import bp as _containers_bp +from routes.services import bp as _services_bp +from routes.peer_dashboard import bp as _peer_dashboard_bp +from routes.config import bp as _config_bp +app.register_blueprint(_email_bp) +app.register_blueprint(_calendar_bp) +app.register_blueprint(_files_bp) +app.register_blueprint(_network_bp) +app.register_blueprint(_wireguard_bp) +app.register_blueprint(_cells_bp) +app.register_blueprint(_peers_bp) +app.register_blueprint(_routing_bp) +app.register_blueprint(_vault_bp) +app.register_blueprint(_containers_bp) +app.register_blueprint(_services_bp) +app.register_blueprint(_peer_dashboard_bp) +app.register_blueprint(_config_bp) + +# Re-export config helpers so existing test imports/patches keep working +from routes.config import ( + _set_pending_restart, _clear_pending_restart, + _collect_service_ports, _dedup_changes, +) + # Unified health monitoring HEALTH_HISTORY_SIZE = 100 health_history = deque(maxlen=HEALTH_HISTORY_SIZE) @@ -562,2427 +572,6 @@ def get_cell_status(): logger.error(f"Error getting cell status: {e}") return jsonify({"error": str(e)}), 500 -@app.route('/api/config', methods=['GET']) -def get_config(): - """Get cell configuration.""" - try: - service_configs = config_manager.get_all_configs() - identity = service_configs.pop('_identity', {}) - config = { - 'cell_name': identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell')), - 'domain': identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell')), - 'ip_range': identity.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')), - 'wireguard_port': identity.get('wireguard_port', int(os.environ.get('WG_PORT', '51820'))), - } - # Expose computed per-service IPs so the frontend doesn't need to derive them - import ip_utils as _ip_utils_cfg - _ips = _ip_utils_cfg.get_service_ips(config['ip_range']) - config['service_ips'] = { - 'dns': _ips['dns'], - 'vip_mail': _ips['vip_mail'], - 'vip_calendar': _ips['vip_calendar'], - 'vip_files': _ips['vip_files'], - 'vip_webdav': _ips['vip_webdav'], - } - config['service_configs'] = service_configs - return jsonify(config) - except Exception as e: - logger.error(f"Error getting config: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/config', methods=['PUT']) -def update_config(): - """Update cell configuration.""" - try: - data = request.get_json(silent=True) - if data is None: - return jsonify({"error": "No data provided"}), 400 - - # Handle identity fields (cell_name, domain, ip_range, wireguard_port) - identity_keys = {'cell_name', 'domain', 'ip_range', 'wireguard_port'} - identity_updates = {k: v for k, v in data.items() if k in identity_keys} - - # Validate cell_name and domain — block injection characters while - # allowing the full range of valid hostname/domain characters. - import re as _re_cfg - # cell_name: hostname component — letters, digits, hyphens only (no dots) - _CELL_NAME_RE = _re_cfg.compile(r'^[a-zA-Z0-9][a-zA-Z0-9-]{0,254}$') - # domain: may include dots for multi-label names (e.g. home.lan) - _DOMAIN_RE = _re_cfg.compile(r'^[a-zA-Z0-9][a-zA-Z0-9.-]{0,254}$') - - if 'cell_name' in identity_updates: - v = str(identity_updates['cell_name']) - if not v: - return jsonify({'error': 'cell_name cannot be empty'}), 400 - if len(v) > 255: - return jsonify({'error': 'cell_name must be 255 characters or fewer'}), 400 - if not _CELL_NAME_RE.match(v): - return jsonify({'error': 'Invalid cell_name: use only letters, digits, hyphens'}), 400 - - if 'domain' in identity_updates: - v = str(identity_updates['domain']) - if not v: - return jsonify({'error': 'domain cannot be empty'}), 400 - if len(v) > 255: - return jsonify({'error': 'domain must be 255 characters or fewer'}), 400 - if not _DOMAIN_RE.match(v): - return jsonify({'error': 'Invalid domain: use only letters, digits, hyphens, dots'}), 400 - - # Validate ip_range — must be a valid CIDR within an RFC-1918 range - if 'ip_range' in identity_updates: - import ipaddress as _ipa - _rfc1918 = [ - _ipa.ip_network('10.0.0.0/8'), - _ipa.ip_network('172.16.0.0/12'), - _ipa.ip_network('192.168.0.0/16'), - ] - try: - _raw = str(identity_updates['ip_range']) - if '/' not in _raw: - return jsonify({'error': 'ip_range must include a CIDR prefix (e.g. 172.20.0.0/16)'}), 400 - _net = _ipa.ip_network(_raw, strict=False) - if not any(_net.subnet_of(r) for r in _rfc1918): - return jsonify({'error': ( - 'ip_range must be within an RFC-1918 private range ' - '(10.0.0.0/8, 172.16.0.0/12, or 192.168.0.0/16)' - )}), 400 - except ValueError as _e: - return jsonify({'error': f'Invalid ip_range: {_e}'}), 400 - - # Validate service config port and IP fields - _port_fields = { - 'network': ['dns_port'], - 'wireguard': ['port'], - 'email': ['smtp_port', 'submission_port', 'imap_port', 'webmail_port'], - 'calendar': ['port'], - 'files': ['port', 'manager_port'], - } - for _svc, _fields in _port_fields.items(): - if _svc not in data: - continue - _svc_data = data[_svc] - if not isinstance(_svc_data, dict): - continue - for _f in _fields: - if _f in _svc_data and _svc_data[_f] is not None and _svc_data[_f] != '': - try: - _p = int(_svc_data[_f]) - if not (1 <= _p <= 65535): - raise ValueError() - except (ValueError, TypeError): - return jsonify({'error': f'{_svc}.{_f} must be an integer between 1 and 65535'}), 400 - # Validate that no two service sections use the same port number - _conflicts = detect_conflicts(config_manager.configs, data) - if _conflicts: - _msgs = [] - for _c in _conflicts: - _pairs = ', '.join(f"{_s}.{_f}" for _s, _f in _c['conflicts']) - _msgs.append(f"port {_c['port']} is used by {_pairs}") - return jsonify({'error': 'Port conflict: ' + '; '.join(_msgs)}), 409 - # Validate WireGuard address (must be valid IP/CIDR) - if 'wireguard' in data and isinstance(data['wireguard'], dict): - _addr = data['wireguard'].get('address') - if _addr: - import ipaddress as _ipa2 - if '/' not in str(_addr): - return jsonify({'error': 'wireguard.address must include a prefix length (e.g. 10.0.0.1/24)'}), 400 - try: - _ipa2.ip_interface(_addr) - except ValueError as _e: - return jsonify({'error': f'wireguard.address is not a valid IP/CIDR: {_e}'}), 400 - - # Capture old identity and service configs BEFORE saving, for change detection + revert - import copy as _copy - old_identity = dict(config_manager.configs.get('_identity', {})) - old_svc_configs = { - svc: dict(config_manager.configs.get(svc, {})) - for svc in data if svc in config_manager.service_schemas - } - # Full pre-change snapshot — used by Discard to revert to original state. - # Must be captured here, before any config writes, so it holds the true old values. - _pre_change_snapshot = {k: _copy.deepcopy(v) for k, v in config_manager.configs.items() - if not k.startswith('_')} - _pre_change_snapshot['_identity'] = _copy.deepcopy(config_manager.configs.get('_identity', {})) - if identity_updates: - stored = config_manager.configs.get('_identity', {}) - stored.update(identity_updates) - config_manager.configs['_identity'] = stored - config_manager._save_all_configs() - - # Map service names to their manager instances - _svc_managers = { - 'network': network_manager, - 'wireguard': wireguard_manager, - 'email': email_manager, - 'calendar': calendar_manager, - 'files': file_manager, - 'routing': routing_manager, - 'vault': app.vault_manager, - } - - all_restarted = [] - all_warnings = [] - - # Update service configurations: persist + apply to real config files - for service, config in data.items(): - if service in config_manager.service_schemas: - config_manager.update_service_config(service, config) - mgr = _svc_managers.get(service) - if mgr: - mgr.update_config(config) - result = mgr.apply_config(config) - all_restarted.extend(result.get('restarted', [])) - all_warnings.extend(result.get('warnings', [])) - service_bus.publish_event(EventType.CONFIG_CHANGED, service, { - 'service': service, - 'config': config - }) - # VPN port or subnet change → all peer client configs are stale - if service == 'wireguard' and ('port' in config or 'address' in config): - for p in peer_registry.list_peers(): - peer_registry.update_peer(p['peer'], {'config_needs_reinstall': True}) - n = len(peer_registry.list_peers()) - if n: - all_warnings.append(f'WireGuard endpoint changed — {n} peer(s) must reinstall VPN config') - # Keep identity.wireguard_port in sync with service config port - if 'port' in config: - _id = config_manager.configs.get('_identity', {}) - _id['wireguard_port'] = config['port'] - config_manager.configs['_identity'] = _id - config_manager._save_all_configs() - - # Apply cell identity domain to network and email services (write files, defer reload) - if identity_updates.get('domain') and identity_updates['domain'] != old_identity.get('domain', ''): - domain = identity_updates['domain'] - net_result = network_manager.apply_domain(domain, reload=False) - all_warnings.extend(net_result.get('warnings', [])) - # Regenerate Caddyfile — virtual host names change with the domain - import ip_utils as _ip_domain - _cur_id = config_manager.configs.get('_identity', {}) - _cur_range = _cur_id.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')) - _cur_name = _cur_id.get('cell_name', os.environ.get('CELL_NAME', 'mycell')) - _ip_domain.write_caddyfile(_cur_range, _cur_name, domain, '/app/config-caddy/Caddyfile') - _set_pending_restart( - [f'domain changed to {domain}'], - ['dns', 'caddy'], - pre_change_snapshot=_pre_change_snapshot, - ) - - # Apply cell name change to DNS hostname record (write files, defer reload) - if identity_updates.get('cell_name'): - old_name = old_identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell')) - new_name = identity_updates['cell_name'] - if old_name != new_name: - cn_result = network_manager.apply_cell_name(old_name, new_name, reload=False) - all_warnings.extend(cn_result.get('warnings', [])) - # Regenerate Caddyfile — main virtual host name changes with cell_name - import ip_utils as _ip_name - _cur_id2 = config_manager.configs.get('_identity', {}) - _cur_range2 = _cur_id2.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')) - _cur_domain2 = identity_updates.get('domain') or _cur_id2.get('domain', os.environ.get('CELL_DOMAIN', 'cell')) - _ip_name.write_caddyfile(_cur_range2, new_name, _cur_domain2, '/app/config-caddy/Caddyfile') - _set_pending_restart( - [f'cell_name changed to {new_name}'], - ['dns'], - pre_change_snapshot=_pre_change_snapshot, - ) - - # Apply ip_range change: regenerate DNS records, update virtual IPs + firewall rules - if identity_updates.get('ip_range') and identity_updates['ip_range'] != old_identity.get('ip_range', ''): - import ip_utils - new_range = identity_updates['ip_range'] - cur_identity = config_manager.configs.get('_identity', {}) - cur_cell_name = cur_identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell')) - cur_domain = cur_identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell')) - # Update DNS zone records immediately - ip_result = network_manager.apply_ip_range(new_range, cur_cell_name, cur_domain) - all_restarted.extend(ip_result.get('restarted', [])) - all_warnings.extend(ip_result.get('warnings', [])) - # Update firewall virtual IPs (iptables) and Caddy virtual IPs immediately - firewall_manager.update_service_ips(new_range) - firewall_manager.ensure_caddy_virtual_ips() - # Write new .env with updated IPs (and current ports) for next container start - env_file = os.environ.get('COMPOSE_ENV_FILE', '/app/.env.compose') - ip_utils.write_env_file(new_range, env_file, _collect_service_ports(config_manager.configs)) - # Regenerate Caddyfile with new VIPs - ip_utils.write_caddyfile(new_range, cur_cell_name, cur_domain, - '/app/config-caddy/Caddyfile') - # Mark ALL containers as needing restart; network_recreate signals that - # docker compose down is required before up (Docker can't change subnet in-place) - _set_pending_restart( - [f'ip_range changed to {new_range} — network will be recreated'], - ['*'], network_recreate=True, - pre_change_snapshot=_pre_change_snapshot, - ) - - # Detect port changes across service configs and identity - # Maps (service_key, field_name) → (port_env_key, [containers]) - _PORT_CHANGE_MAP = { - ('network', 'dns_port'): ('dns_port', ['dns']), - ('wireguard','port'): ('wg_port', ['wireguard']), - ('email', 'smtp_port'): ('mail_smtp_port', ['mail']), - ('email', 'submission_port'): ('mail_submission_port', ['mail']), - ('email', 'imap_port'): ('mail_imap_port', ['mail']), - ('email', 'webmail_port'): ('rainloop_port', ['rainloop']), - ('calendar', 'port'): ('radicale_port', ['radicale']), - ('files', 'port'): ('webdav_port', ['webdav']), - ('files', 'manager_port'): ('filegator_port', ['filegator']), - } - - port_changed_containers = set() - port_change_messages = [] - - import ip_utils as _ip_utils_pcd - for (svc_key, field), (_env_key, containers) in _PORT_CHANGE_MAP.items(): - if svc_key in data and field in data[svc_key]: - default_val = _ip_utils_pcd.PORT_DEFAULTS.get(_env_key) - old_val = old_svc_configs.get(svc_key, {}).get(field, default_val) - new_val = data[svc_key][field] - if old_val != new_val: - port_changed_containers.update(containers) - port_change_messages.append( - f'{svc_key} {field}: {old_val} → {new_val}' - ) - - # wireguard_port in identity also drives WG_PORT env var; sync to service config - if 'wireguard_port' in identity_updates: - old_wg = old_identity.get('wireguard_port', _ip_utils_pcd.PORT_DEFAULTS.get('wg_port', 51820)) - new_wg = identity_updates['wireguard_port'] - if old_wg != new_wg: - # Sync to wireguard service config and update wg0.conf - _wg_svc = config_manager.configs.get('wireguard', {}) - _wg_svc['port'] = new_wg - config_manager.update_service_config('wireguard', _wg_svc) - wireguard_manager.apply_config({'port': new_wg}) - port_changed_containers.add('wireguard') - port_change_messages.append(f'wireguard_port: {old_wg} → {new_wg}') - - if port_changed_containers: - import ip_utils as _ip_utils_ports - _env_file = os.environ.get('COMPOSE_ENV_FILE', '/app/.env.compose') - _ip_range = config_manager.configs.get('_identity', {}).get( - 'ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16') - ) - _ip_utils_ports.write_env_file( - _ip_range, _env_file, _collect_service_ports(config_manager.configs) - ) - _set_pending_restart(port_change_messages, list(port_changed_containers), - pre_change_snapshot=_pre_change_snapshot) - - logger.info(f"Updated config, restarted: {all_restarted}") - return jsonify({ - "message": "Configuration updated and applied", - "restarted": all_restarted, - "warnings": all_warnings, - }) - except Exception as e: - logger.error(f"Error updating config: {e}") - return jsonify({"error": str(e)}), 500 - - -# --------------------------------------------------------------------------- -# Pending-restart helpers -# --------------------------------------------------------------------------- - -def _collect_service_ports(configs: dict) -> dict: - """Extract current port values from service configs for .env generation.""" - ports = {} - net = configs.get('network', {}) - wg = configs.get('wireguard', {}) - email = configs.get('email', {}) - cal = configs.get('calendar', {}) - files = configs.get('files', {}) - identity = configs.get('_identity', {}) - - if 'dns_port' in net: ports['dns_port'] = net['dns_port'] - if 'port' in wg: ports['wg_port'] = wg['port'] - elif 'wireguard_port' in identity: ports['wg_port'] = identity['wireguard_port'] - if 'smtp_port' in email: ports['mail_smtp_port'] = email['smtp_port'] - if 'submission_port' in email: ports['mail_submission_port'] = email['submission_port'] - if 'imap_port' in email: ports['mail_imap_port'] = email['imap_port'] - if 'webmail_port' in email: ports['rainloop_port'] = email['webmail_port'] - if 'port' in cal: ports['radicale_port'] = cal['port'] - if 'port' in files: ports['webdav_port'] = files['port'] - if 'manager_port' in files: ports['filegator_port'] = files['manager_port'] - return ports - - -def _dedup_changes(existing: list, new: list) -> list: - """Merge change lists, keeping only the latest entry per config key.""" - def key_of(msg: str) -> str: - # "ip_range changed to X" → "ip_range" - if ' changed' in msg: - return msg.split(' changed')[0].strip() - # "network dns_port: 52 → 53" → "network dns_port" - if ':' in msg: - return msg.split(':')[0].strip() - return msg - merged = {key_of(c): c for c in existing} - merged.update({key_of(c): c for c in new}) - return list(merged.values()) - - -def _set_pending_restart(changes: list, containers: list = None, network_recreate: bool = False, - pre_change_snapshot: dict = None): - """Record that specific containers need to be restarted to apply configuration. - - containers: list of docker-compose service names, or None/'*' to restart all. - network_recreate: True when the Docker bridge subnet changed (requires down+up). - pre_change_snapshot: full config captured BEFORE this save (for Discard to revert). - Merges with any existing pending state so multiple changes accumulate. - """ - from datetime import datetime as _dt - existing = config_manager.configs.get('_pending_restart', {}) - existing_changes = existing.get('changes', []) if existing.get('needs_restart') else [] - existing_containers = existing.get('containers', []) if existing.get('needs_restart') else [] - - # Keep the oldest snapshot (the true pre-change state). Never overwrite it with a - # later snapshot — subsequent changes while pending should still revert to origin. - if not existing.get('needs_restart'): - snapshot = pre_change_snapshot or {} - else: - snapshot = existing.get('_snapshot', {}) - - if containers is None or '*' in (containers or []) or existing_containers == ['*']: - new_containers = ['*'] - else: - new_containers = list(set(existing_containers) | set(containers)) - - config_manager.configs['_pending_restart'] = { - 'needs_restart': True, - 'changed_at': _dt.utcnow().isoformat(), - 'changes': _dedup_changes(existing_changes, changes), - 'containers': new_containers, - 'network_recreate': network_recreate or existing.get('network_recreate', False), - '_snapshot': snapshot, - } - config_manager._save_all_configs() - - -def _clear_pending_restart(): - config_manager.configs['_pending_restart'] = { - 'needs_restart': False, 'changes': [], 'containers': [], 'network_recreate': False - } - config_manager._save_all_configs() - - -@app.route('/api/config/pending', methods=['GET']) -def get_pending_config(): - """Return whether there are unapplied configuration changes that require a restart.""" - pending = config_manager.configs.get('_pending_restart', {}) - return jsonify({ - 'needs_restart': pending.get('needs_restart', False), - 'changed_at': pending.get('changed_at'), - 'changes': pending.get('changes', []), - 'containers': pending.get('containers', ['*']), - }) - - -@app.route('/api/config/pending', methods=['DELETE']) -def cancel_pending_config(): - """Discard pending configuration changes and restore config to pre-change snapshot.""" - pending = config_manager.configs.get('_pending_restart', {}) - snapshot = pending.get('_snapshot', {}) - if snapshot: - # Capture current (changed) identity before reverting, to rewrite config files - cur_identity = dict(config_manager.configs.get('_identity', {})) - old_identity = snapshot.get('_identity', {}) - - # Restore config values from snapshot - for k, v in snapshot.items(): - config_manager.configs[k] = v - - # Rewrite DNS/Caddy config files back to old values so they match restored config - import ip_utils as _ip_revert - _id = config_manager.configs.get('_identity', {}) - _range = _id.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')) - _cell = _id.get('cell_name', os.environ.get('CELL_NAME', 'mycell')) - _dom = _id.get('domain', os.environ.get('CELL_DOMAIN', 'cell')) - - cur_domain = cur_identity.get('domain', '') - old_domain = old_identity.get('domain', '') - if cur_domain and old_domain and cur_domain != old_domain: - network_manager.apply_domain(old_domain, reload=False) - - cur_cell_name = cur_identity.get('cell_name', '') - old_cell_name = old_identity.get('cell_name', '') - if cur_cell_name and old_cell_name and cur_cell_name != old_cell_name: - network_manager.apply_cell_name(cur_cell_name, old_cell_name, reload=False) - - _ip_revert.write_caddyfile(_range, _cell, _dom, '/app/config-caddy/Caddyfile') - - _clear_pending_restart() - return jsonify({'message': 'Pending changes discarded'}) - - -@app.route('/api/config/apply', methods=['POST']) -def apply_pending_config(): - """Apply pending configuration by restarting containers via docker compose up -d.""" - try: - pending = config_manager.configs.get('_pending_restart', {}) - if not pending.get('needs_restart'): - return jsonify({'message': 'No pending changes to apply'}) - - # Get project working dir and image name from our own container labels - project_dir = '/home/roof/pic' - api_image = 'pic_api:latest' # fallback (docker-compose v1 naming) - try: - import docker as _docker_sdk - _client = _docker_sdk.from_env() - _self = _client.containers.get('cell-api') - project_dir = _self.labels.get('com.docker.compose.project.working_dir', project_dir) - # Use the actual image tag so the helper works regardless of compose version - # (docker-compose v1 builds pic_api:latest, compose v2+ builds pic-api:latest) - tags = _self.image.tags - if tags: - api_image = tags[0] - except Exception: - pass - - containers = pending.get('containers', ['*']) - - # Check if the IP range (network subnet) is changing — Docker cannot modify an - # existing network's subnet in-place, so we need `down` + `up` in that case. - needs_network_recreate = pending.get('network_recreate', False) - - host_env = os.path.join(project_dir, '.env') - host_compose = os.path.join(project_dir, 'docker-compose.yml') - - if '*' in containers: - # All-services restart: `docker compose down` or `up -d` may stop/recreate the - # API container itself, killing this background thread mid-operation. - # Spawn an independent helper container (same image as cell-api) that has docker - # CLI and survives cell-api being stopped/recreated. - # Clear pending flag now — the helper runs fire-and-forget and we cannot track - # its exit code from within the API process (it may restart us). - _clear_pending_restart() - if needs_network_recreate: - helper_script = ( - f'sleep 2' - f' && docker compose --project-directory {project_dir}' - f' -f {host_compose} --env-file {host_env} down' - f' && docker compose --project-directory {project_dir}' - f' -f {host_compose} --env-file {host_env} up -d' - ) - else: - helper_script = ( - f'sleep 2' - f' && docker compose --project-directory {project_dir}' - f' -f {host_compose} --env-file {host_env} up -d' - ) - - def _do_apply(): - import subprocess as _subprocess - _subprocess.Popen( - ['docker', 'run', '--rm', - '-v', '/var/run/docker.sock:/var/run/docker.sock', - '-v', f'{project_dir}:{project_dir}', - '--entrypoint', 'sh', - api_image, - '-c', helper_script], - close_fds=True, - stdout=_subprocess.DEVNULL, - stderr=_subprocess.DEVNULL, - ) - logger.info( - 'spawned helper container for all-services restart' - + (' (network_recreate)' if needs_network_recreate else '') - ) - else: - # Specific containers only — API is not affected, run directly from here. - # Only clear the pending flag after the subprocess exits with code 0 so that - # if the compose command fails the UI still shows changes as pending. - def _do_apply(): - import time as _time - import subprocess as _subprocess - _time.sleep(0.3) - result = _subprocess.run( - ['docker', 'compose', - '--project-directory', project_dir, - '-f', '/app/docker-compose.yml', - '--env-file', '/app/.env.compose', - 'up', '-d', '--no-deps', '--force-recreate'] + containers, - capture_output=True, text=True, timeout=120, - ) - if result.returncode != 0: - logger.error(f"docker compose up failed: {result.stderr.strip()}") - else: - logger.info(f'docker compose up completed for: {containers}') - _clear_pending_restart() - - threading.Thread(target=_do_apply, daemon=False).start() - - return jsonify({ - 'message': 'Applying configuration — containers are restarting', - 'restart_in_progress': True, - }) - except Exception as e: - logger.error(f"Error applying config: {e}") - return jsonify({'error': str(e)}), 500 - - -# Configuration management endpoints -@app.route('/api/config/backup', methods=['POST']) -def create_config_backup(): - """Create configuration backup.""" - try: - backup_id = config_manager.backup_config() - service_bus.publish_event(EventType.BACKUP_CREATED, 'api', { - 'backup_id': backup_id, - 'timestamp': datetime.utcnow().isoformat() - }) - return jsonify({"backup_id": backup_id}) - except Exception as e: - logger.error(f"Error creating backup: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/config/backups', methods=['GET']) -def list_config_backups(): - """List available backups.""" - try: - backups = config_manager.list_backups() - return jsonify(backups) - except Exception as e: - logger.error(f"Error listing backups: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/config/restore/', methods=['POST']) -def restore_config(backup_id): - """Restore configuration from backup. Body may contain {services: [...]} for selective restore.""" - try: - data = request.get_json(silent=True) or {} - services = data.get('services') # None = full restore - success = config_manager.restore_config(backup_id, services=services) - if success: - service_bus.publish_event(EventType.RESTORE_COMPLETED, 'api', { - 'backup_id': backup_id, - 'timestamp': datetime.utcnow().isoformat() - }) - return jsonify({"message": f"Configuration restored from backup: {backup_id}"}) - else: - return jsonify({"error": f"Failed to restore backup: {backup_id}"}), 500 - except Exception as e: - logger.error(f"Error restoring backup: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/config/export', methods=['GET']) -def export_config(): - """Export configuration.""" - try: - format = request.args.get('format', 'json') - config_data = config_manager.export_config(format) - return jsonify({"config": config_data, "format": format}) - except Exception as e: - logger.error(f"Error exporting config: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/config/import', methods=['POST']) -def import_config(): - """Import configuration.""" - try: - data = request.get_json(silent=True) - if data is None: - return jsonify({"error": "No data provided"}), 400 - - config_data = data.get('config') - format = data.get('format', 'json') - - success = config_manager.import_config(config_data, format) - if success: - return jsonify({"message": "Configuration imported successfully"}) - else: - return jsonify({"error": "Failed to import configuration"}), 500 - except Exception as e: - logger.error(f"Error importing config: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/config/backups//download', methods=['GET']) -def download_backup(backup_id): - """Download a backup as a zip file.""" - try: - from pathlib import Path - backup_path = config_manager.backup_dir / backup_id - if not backup_path.exists(): - return jsonify({'error': f'Backup {backup_id} not found'}), 404 - buf = io.BytesIO() - with zipfile.ZipFile(buf, 'w', zipfile.ZIP_DEFLATED) as zf: - for f in backup_path.rglob('*'): - if f.is_file(): - zf.write(f, f.relative_to(backup_path)) - buf.seek(0) - return send_file(buf, mimetype='application/zip', - as_attachment=True, - download_name=f'{backup_id}.zip') - except Exception as e: - logger.error(f"Error downloading backup: {e}") - return jsonify({'error': str(e)}), 500 - -@app.route('/api/config/backup/upload', methods=['POST']) -def upload_backup(): - """Upload a backup zip file.""" - try: - if 'file' not in request.files: - return jsonify({'error': 'No file provided'}), 400 - f = request.files['file'] - filename = f.filename or '' - if filename.endswith('.zip'): - backup_id = filename[:-4] - else: - backup_id = f"backup_{datetime.utcnow().strftime('%Y%m%d_%H%M%S')}" - backup_id = ''.join(c for c in backup_id if c.isalnum() or c == '_') - backup_path = config_manager.backup_dir / backup_id - backup_path.mkdir(parents=True, exist_ok=True) - try: - with zipfile.ZipFile(io.BytesIO(f.read())) as zf: - zf.extractall(backup_path) - except zipfile.BadZipFile: - shutil.rmtree(backup_path, ignore_errors=True) - return jsonify({'error': 'Invalid zip file'}), 400 - if not (backup_path / 'manifest.json').exists(): - shutil.rmtree(backup_path, ignore_errors=True) - return jsonify({'error': 'Invalid backup: missing manifest.json'}), 400 - return jsonify({'backup_id': backup_id}) - except Exception as e: - logger.error(f"Error uploading backup: {e}") - return jsonify({'error': str(e)}), 500 - -@app.route('/api/config/backups/', methods=['DELETE']) -def delete_config_backup(backup_id): - """Delete a configuration backup.""" - try: - success = config_manager.delete_backup(backup_id) - if success: - return jsonify({"message": f"Backup {backup_id} deleted"}) - else: - return jsonify({"error": f"Failed to delete backup {backup_id}"}), 500 - except Exception as e: - logger.error(f"Error deleting backup: {e}") - return jsonify({"error": str(e)}), 500 - -# Service bus endpoints -@app.route('/api/services/bus/status', methods=['GET']) -def get_service_bus_status(): - """Get service bus status.""" - try: - return jsonify(service_bus.get_service_status_summary()) - except Exception as e: - logger.error(f"Error getting service bus status: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/services/bus/events', methods=['GET']) -def get_service_bus_events(): - """Get service bus event history.""" - try: - event_type = request.args.get('type') - source = request.args.get('source') - limit = int(request.args.get('limit', 100)) - - events = service_bus.get_event_history( - EventType(event_type) if event_type else None, - source, - limit - ) - - # Convert events to serializable format - serializable_events = [] - for event in events: - serializable_events.append({ - 'event_id': event.event_id, - 'event_type': event.event_type.value, - 'source': event.source, - 'data': event.data, - 'timestamp': event.timestamp.isoformat() - }) - - return jsonify(serializable_events) - except Exception as e: - logger.error(f"Error getting service bus events: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/services/bus/services//start', methods=['POST']) -def start_service(service_name): - """Start a service with orchestration.""" - try: - success = service_bus.orchestrate_service_start(service_name) - if success: - return jsonify({"message": f"Service {service_name} started successfully"}) - else: - return jsonify({"error": f"Failed to start service {service_name}"}), 500 - except Exception as e: - logger.error(f"Error starting service {service_name}: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/services/bus/services//stop', methods=['POST']) -def stop_service(service_name): - """Stop a service with orchestration.""" - try: - success = service_bus.orchestrate_service_stop(service_name) - if success: - return jsonify({"message": f"Service {service_name} stopped successfully"}) - else: - return jsonify({"error": f"Failed to stop service {service_name}"}), 500 - except Exception as e: - logger.error(f"Error stopping service {service_name}: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/services/bus/services//restart', methods=['POST']) -def restart_service(service_name): - """Restart a service with orchestration.""" - try: - success = service_bus.orchestrate_service_restart(service_name) - if success: - return jsonify({"message": f"Service {service_name} restarted successfully"}) - else: - return jsonify({"error": f"Failed to restart service {service_name}"}), 500 - except Exception as e: - logger.error(f"Error restarting service {service_name}: {e}") - return jsonify({"error": str(e)}), 500 - -# Logging endpoints -@app.route('/api/logs/services/', methods=['GET']) -def get_service_logs(service): - """Get logs for a specific service.""" - try: - level = request.args.get('level', 'INFO') - lines = int(request.args.get('lines', 50)) - - logs = log_manager.get_service_logs(service, level, lines) - return jsonify({"service": service, "logs": logs}) - except Exception as e: - logger.error(f"Error getting logs for {service}: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/logs/search', methods=['POST']) -def search_logs(): - """Search logs across all services.""" - try: - data = request.get_json(silent=True) or {} - query = data.get('query', '') - services = data.get('services') - level = data.get('level') - time_range = data.get('time_range') - - results = log_manager.search_logs(query, time_range, services, level) - return jsonify({"results": results, "count": len(results)}) - except Exception as e: - logger.error(f"Error searching logs: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/logs/export', methods=['POST']) -def export_logs(): - """Export logs in specified format.""" - try: - data = request.get_json(silent=True) or {} - format = data.get('format', 'json') - filters = data.get('filters', {}) - - log_data = log_manager.export_logs(format, filters) - return jsonify({"logs": log_data, "format": format}) - except Exception as e: - logger.error(f"Error exporting logs: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/logs/statistics', methods=['GET']) -def get_log_statistics(): - """Get log statistics.""" - try: - service = request.args.get('service') - stats = log_manager.get_log_statistics(service) - return jsonify(stats) - except Exception as e: - logger.error(f"Error getting log statistics: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/logs/rotate', methods=['POST']) -def rotate_logs(): - """Manually rotate an API service log file.""" - try: - data = request.get_json(silent=True) or {} - service = data.get('service') # None = rotate all - log_manager.rotate_logs(service) - return jsonify({"message": "Logs rotated successfully"}) - except Exception as e: - logger.error(f"Error rotating logs: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/logs/files', methods=['GET']) -def get_log_file_infos(): - """List service log files with sizes.""" - try: - return jsonify(log_manager.get_all_log_file_infos()) - except Exception as e: - logger.error(f"Error listing log files: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/logs/verbosity', methods=['GET']) -def get_log_verbosity(): - """Return current per-service log levels.""" - try: - return jsonify(log_manager.get_service_levels()) - except Exception as e: - logger.error(f"Error getting log verbosity: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/logs/verbosity', methods=['PUT']) -def set_log_verbosity(): - """Update log levels for one or all services. Body: {service: level} map.""" - try: - data = request.get_json(silent=True) or {} - for service, level in data.items(): - log_manager.set_service_level(service, level) - # Persist to config so levels survive API restarts - levels_file = os.path.join(os.path.dirname(__file__), 'config', 'log_levels.json') - os.makedirs(os.path.dirname(levels_file), exist_ok=True) - current = {} - if os.path.exists(levels_file): - try: - with open(levels_file) as f: - current = json.load(f) - except Exception: - pass - current.update(data) - with open(levels_file, 'w') as f: - json.dump(current, f, indent=2) - return jsonify({"message": "Log levels updated", "levels": log_manager.get_service_levels()}) - except Exception as e: - logger.error(f"Error setting log verbosity: {e}") - return jsonify({"error": str(e)}), 500 - -# Network Services API -@app.route('/api/dns/records', methods=['GET']) -def get_dns_records(): - """Get DNS records.""" - try: - records = network_manager.get_dns_records() - return jsonify(records) - except Exception as e: - logger.error(f"Error getting DNS records: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/dns/records', methods=['POST']) -def add_dns_record(): - """Add DNS record.""" - try: - data = request.get_json(silent=True) - if data is None: - return jsonify({"error": "No data provided"}), 400 - result = network_manager.add_dns_record(**data) - return jsonify(result) - except Exception as e: - logger.error(f"Error adding DNS record: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/dns/records', methods=['DELETE']) -def remove_dns_record(): - """Remove DNS record.""" - try: - data = request.get_json(silent=True) - result = network_manager.remove_dns_record(**data) - return jsonify(result) - except Exception as e: - logger.error(f"Error removing DNS record: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/dhcp/leases', methods=['GET']) -def get_dhcp_leases(): - """Get DHCP leases.""" - try: - leases = network_manager.get_dhcp_leases() - return jsonify(leases) - except Exception as e: - logger.error(f"Error getting DHCP leases: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/dhcp/reservations', methods=['POST']) -def add_dhcp_reservation(): - try: - data = request.get_json(silent=True) - if not data: - return jsonify({"error": "No data provided"}), 400 - for field in ('mac', 'ip'): - if field not in data: - return jsonify({"error": f"Missing required field: {field}"}), 400 - result = network_manager.add_dhcp_reservation(data['mac'], data['ip'], data.get('hostname', '')) - return jsonify({"success": result}) - except Exception as e: - logger.error(f"Error adding DHCP reservation: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/dhcp/reservations', methods=['DELETE']) -def remove_dhcp_reservation(): - """Remove DHCP reservation.""" - try: - data = request.get_json(silent=True) - if not data or 'mac' not in data: - return jsonify({"error": "Missing required field: mac"}), 400 - result = network_manager.remove_dhcp_reservation(data['mac']) - return jsonify({"success": result}) - except Exception as e: - logger.error(f"Error removing DHCP reservation: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/ntp/status', methods=['GET']) -def get_ntp_status(): - """Get NTP status.""" - try: - status = network_manager.get_ntp_status() - return jsonify(status) - except Exception as e: - logger.error(f"Error getting NTP status: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/network/info', methods=['GET']) -def get_network_info(): - """Get general network info (interfaces, gateway, DNS, etc.)""" - try: - info = network_manager.get_network_info() - return jsonify(info) - except Exception as e: - logger.error(f"Error getting network info: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/dns/status', methods=['GET']) -def get_dns_status(): - """Get DNS service status and summary info.""" - try: - status = network_manager.get_dns_status() - return jsonify(status) - except Exception as e: - logger.error(f"Error getting DNS status: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/network/test', methods=['POST']) -def test_network(): - try: - result = network_manager.test_connectivity() - return jsonify(result) - except Exception as e: - logger.error(f"Error testing network: {e}") - return jsonify({"error": str(e)}), 500 - -# WireGuard API -@app.route('/api/wireguard/keys', methods=['GET']) -def get_wireguard_keys(): - """Get WireGuard keys (public key only; private key never leaves the server).""" - try: - keys = wireguard_manager.get_keys() - return jsonify({ - 'public_key': keys.get('public_key', ''), - 'has_private_key': bool(keys.get('private_key')), - }) - except Exception as e: - logger.error(f"Error getting WireGuard keys: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/wireguard/keys/peer', methods=['POST']) -def generate_peer_keys(): - """Generate peer keys.""" - try: - data = request.get_json(silent=True) or {} - name = data.get('name') or data.get('peer_name') - if not name: - return jsonify({"error": "Missing peer name"}), 400 - result = wireguard_manager.generate_peer_keys(name) - return jsonify(result) - except Exception as e: - logger.error(f"Error generating peer keys: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/wireguard/config', methods=['GET']) -def get_wireguard_config(): - """Get WireGuard configuration.""" - try: - result = wireguard_manager.get_config() - return jsonify(result) - except Exception as e: - logger.error(f"Error getting WireGuard config: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/wireguard/peers', methods=['GET']) -def get_wireguard_peers(): - """Get WireGuard peers.""" - try: - peers = wireguard_manager.get_peers() - return jsonify(peers) - except Exception as e: - logger.error(f"Error getting WireGuard peers: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/wireguard/peers', methods=['POST']) -def add_wireguard_peer(): - """Add WireGuard peer.""" - try: - data = request.get_json(silent=True) or {} - result = wireguard_manager.add_peer( - name=data.get('name', ''), - public_key=data.get('public_key', ''), - endpoint_ip=data.get('endpoint', data.get('endpoint_ip', '')), - allowed_ips=data.get('allowed_ips', ''), - persistent_keepalive=data.get('persistent_keepalive', 25) - ) - return jsonify({"success": result}) - except Exception as e: - logger.error(f"Error adding WireGuard peer: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/wireguard/peers', methods=['DELETE']) -def remove_wireguard_peer(): - """Remove WireGuard peer.""" - try: - data = request.get_json(silent=True) or {} - public_key = data.get('public_key') or data.get('name', '') - result = wireguard_manager.remove_peer(public_key) - return jsonify({"success": result}) - except Exception as e: - logger.error(f"Error removing WireGuard peer: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/wireguard/status', methods=['GET']) -def get_wireguard_status(): - """Get WireGuard status.""" - try: - status = wireguard_manager.get_status() - return jsonify(status) - except Exception as e: - logger.error(f"Error getting WireGuard status: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/wireguard/connectivity', methods=['POST']) -def test_wireguard_connectivity(): - try: - data = request.get_json(silent=True) - if data is None: - return jsonify({"error": "No data provided"}), 400 - result = wireguard_manager.test_connectivity(data) - return jsonify(result) - except Exception as e: - logger.error(f"Error testing WireGuard connectivity: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/wireguard/peers/ip', methods=['PUT']) -def update_peer_ip(): - """Update peer IP.""" - try: - data = request.get_json(silent=True) or {} - result = wireguard_manager.update_peer_ip( - data.get('public_key', data.get('peer', '')), - data.get('ip', '') - ) - return jsonify({"success": result}) - except Exception as e: - logger.error(f"Error updating peer IP: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/wireguard/peers/status', methods=['POST']) -def get_peer_status(): - """Get live WireGuard status for a single peer.""" - try: - data = request.get_json(silent=True) or {} - public_key = data.get('public_key', '') - if not public_key: - return jsonify({"error": "Missing public_key"}), 400 - status = wireguard_manager.get_peer_status(public_key) - return jsonify(status) - except Exception as e: - logger.error(f"Error getting peer status: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/wireguard/peers/statuses', methods=['GET']) -def get_all_peer_statuses(): - """Get live WireGuard status for all peers (keyed by public_key).""" - try: - statuses = wireguard_manager.get_all_peer_statuses() - return jsonify(statuses) - except Exception as e: - logger.error(f"Error getting peer statuses: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/wireguard/network/setup', methods=['POST']) -def setup_network(): - """Setup network configuration for internet access.""" - try: - success = wireguard_manager.setup_network_configuration() - if success: - return jsonify({"message": "Network configuration setup completed successfully"}) - else: - return jsonify({"error": "Failed to setup network configuration"}), 500 - except Exception as e: - logger.error(f"Error setting up network configuration: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/wireguard/network/status', methods=['GET']) -def get_network_status(): - """Get network configuration status.""" - try: - status = wireguard_manager.get_network_status() - return jsonify(status) - except Exception as e: - logger.error(f"Error getting network status: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/wireguard/peers/config', methods=['POST']) -def get_peer_config(): - try: - data = request.get_json(silent=True) or {} - peer_name = data.get('name', data.get('peer', '')) - - # Look up peer details from registry if not supplied - peer_ip = data.get('ip', '') - peer_private_key = data.get('private_key', '') - registered = peer_registry.get_peer(peer_name) if peer_name else {} - if peer_name and (not peer_ip or not peer_private_key): - if registered: - peer_ip = peer_ip or registered.get('ip', '') - peer_private_key = peer_private_key or registered.get('private_key', '') - - # Use real external endpoint if not supplied - server_endpoint = data.get('server_endpoint', '') - if not server_endpoint: - srv = wireguard_manager.get_server_config() - server_endpoint = srv.get('endpoint') or '' - - # Determine AllowedIPs: explicit > peer's stored internet_access > default full tunnel - allowed_ips = data.get('allowed_ips') or None - if not allowed_ips and registered: - internet_access = registered.get('internet_access', True) - allowed_ips = wireguard_manager.FULL_TUNNEL_IPS if internet_access else wireguard_manager.get_split_tunnel_ips() - - result = wireguard_manager.get_peer_config( - peer_name=peer_name, - peer_ip=peer_ip, - peer_private_key=peer_private_key, - server_endpoint=server_endpoint, - allowed_ips=allowed_ips, - ) - return jsonify({"config": result}) - except Exception as e: - logger.error(f"Error getting peer config: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/wireguard/server-config', methods=['GET']) -def get_server_config(): - try: - config = wireguard_manager.get_server_config() - return jsonify(config) - except Exception as e: - logger.error(f"Error getting server config: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/wireguard/refresh-ip', methods=['GET', 'POST']) -def refresh_external_ip(): - try: - ip = wireguard_manager.get_external_ip(force_refresh=True) - port = wireguard_manager._get_configured_port() - return jsonify({ - 'external_ip': ip, - 'port': port, - 'endpoint': f'{ip}:{port}' if ip else None, - }) - except Exception as e: - logger.error(f"Error refreshing external IP: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/wireguard/apply-enforcement', methods=['POST']) -def apply_wireguard_enforcement(): - """Re-apply per-peer iptables and DNS enforcement rules (call after WireGuard restart).""" - try: - peers = peer_registry.list_peers() - firewall_manager.apply_all_peer_rules(peers) - firewall_manager.apply_all_dns_rules(peers, COREFILE_PATH, _configured_domain(), - cell_links=cell_link_manager.list_connections()) - return jsonify({'ok': True, 'peers': len(peers)}) - except Exception as e: - return jsonify({'error': str(e)}), 500 - -@app.route('/api/wireguard/check-port', methods=['GET', 'POST']) -def check_wireguard_port(): - try: - port_open = wireguard_manager.check_port_open() - return jsonify({'port_open': port_open, 'port': wireguard_manager._get_configured_port()}) - except Exception as e: - return jsonify({"error": str(e)}), 500 - -# ── Cell-to-cell connections ───────────────────────────────────────────────── - -@app.route('/api/cells/invite', methods=['GET']) -def get_cell_invite(): - """Generate an invite package for this cell.""" - try: - identity = config_manager.configs.get('_identity', {}) - cell_name = identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell')) - domain = identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell')) - invite = cell_link_manager.generate_invite(cell_name, domain) - return jsonify(invite) - except Exception as e: - logger.error(f"Error generating cell invite: {e}") - return jsonify({'error': str(e)}), 500 - -@app.route('/api/cells', methods=['GET']) -def list_cell_connections(): - """List all connected cells.""" - try: - return jsonify(cell_link_manager.list_connections()) - except Exception as e: - return jsonify({'error': str(e)}), 500 - -@app.route('/api/cells', methods=['POST']) -def add_cell_connection(): - """Connect to a remote cell using their invite package.""" - try: - data = request.get_json(silent=True) - if not data: - return jsonify({'error': 'No data provided'}), 400 - for field in ('cell_name', 'public_key', 'vpn_subnet', 'dns_ip', 'domain'): - if field not in data: - return jsonify({'error': f'Missing field: {field}'}), 400 - link = cell_link_manager.add_connection(data) - return jsonify({'message': f"Connected to cell '{data['cell_name']}'", 'link': link}), 201 - except ValueError as e: - return jsonify({'error': str(e)}), 400 - except Exception as e: - logger.error(f"Error adding cell connection: {e}") - return jsonify({'error': str(e)}), 500 - -@app.route('/api/cells/', methods=['DELETE']) -def remove_cell_connection(cell_name): - """Disconnect from a remote cell.""" - try: - cell_link_manager.remove_connection(cell_name) - return jsonify({'message': f"Cell '{cell_name}' disconnected"}) - except ValueError as e: - return jsonify({'error': str(e)}), 404 - except Exception as e: - logger.error(f"Error removing cell connection: {e}") - return jsonify({'error': str(e)}), 500 - -@app.route('/api/cells//status', methods=['GET']) -def get_cell_connection_status(cell_name): - """Get live status for a connected cell.""" - try: - status = cell_link_manager.get_connection_status(cell_name) - return jsonify(status) - except ValueError as e: - return jsonify({'error': str(e)}), 404 - except Exception as e: - return jsonify({'error': str(e)}), 500 - -# Peer Registry API -@app.route('/api/peers', methods=['GET']) -def get_peers(): - """Get all peers.""" - try: - peers = peer_registry.list_peers() - return jsonify(peers) - except Exception as e: - logger.error(f"Error getting peers: {e}") - return jsonify({"error": str(e)}), 500 - -def _next_peer_ip() -> str: - """Auto-assign the next free host address from the configured VPN subnet.""" - import ipaddress - server_addr = wireguard_manager._get_configured_address() # e.g. '10.0.0.1/24' - network = ipaddress.ip_network(server_addr, strict=False) - server_ip = str(ipaddress.ip_interface(server_addr).ip) - used = {p.get('ip', '').split('/')[0] for p in peer_registry.list_peers()} - for host in network.hosts(): - ip = str(host) - if ip == server_ip: - continue - if ip not in used: - return ip - raise ValueError(f'No free IPs left in {network}') - - -@app.route('/api/peers', methods=['POST']) -def add_peer(): - """Add a peer and auto-provision auth/email/calendar/files accounts.""" - try: - data = request.get_json(silent=True) - if data is None: - return jsonify({"error": "No data provided"}), 400 - - # Validate required fields (ip is optional — auto-assigned if omitted) - required_fields = ['name', 'public_key'] - for field in required_fields: - if field not in data: - return jsonify({"error": f"Missing required field: {field}"}), 400 - - # Password is required for peer provisioning - password = data.get('password') or '' - if not password: - return jsonify({"error": "Missing required field: password"}), 400 - if len(password) < 10: - return jsonify({"error": "password must be at least 10 characters"}), 400 - - try: - assigned_ip = data.get('ip') or _next_peer_ip() - except ValueError as e: - return jsonify({'error': str(e)}), 409 - - # Validate service_access if provided - _valid_services = {'calendar', 'files', 'mail', 'webdav'} - service_access = data.get('service_access', list(_valid_services)) - if not isinstance(service_access, list) or not all(s in _valid_services for s in service_access): - return jsonify({"error": f"service_access must be a list of: {sorted(_valid_services)}"}), 400 - - peer_name = data['name'] - - # --- Provision auth account (hard-required) --- - if not auth_manager.create_user(peer_name, password, 'peer'): - return jsonify({"error": f"Could not create auth account (duplicate name?)"}), 400 - - # --- Provision service accounts (best-effort; failures logged but non-fatal) --- - provisioned = ['auth'] - domain = _configured_domain() - for step_name, step_fn in [ - ('email', lambda: email_manager.create_email_user(peer_name, domain, password)), - ('calendar', lambda: calendar_manager.create_calendar_user(peer_name, password)), - ('files', lambda: file_manager.create_user(peer_name, password)), - ]: - try: - if step_fn(): - provisioned.append(step_name) - else: - logger.warning(f"Peer {peer_name}: {step_name} account creation returned False (service may not be ready)") - except Exception as e: - logger.warning(f"Peer {peer_name}: {step_name} account creation failed (non-fatal): {e}") - - # Add peer to registry with all provided fields - peer_info = { - 'peer': peer_name, - 'ip': assigned_ip, - 'public_key': data['public_key'], - 'private_key': data.get('private_key'), - 'server_public_key': data.get('server_public_key'), - 'server_endpoint': data.get('server_endpoint'), - 'allowed_ips': data.get('allowed_ips'), - 'persistent_keepalive': data.get('persistent_keepalive'), - 'description': data.get('description'), - 'internet_access': data.get('internet_access', True), - 'service_access': service_access, - 'peer_access': data.get('peer_access', True), - 'config_needs_reinstall': False, - } - - peer_added_to_registry = False - try: - # Step 1: Add to registry - success = peer_registry.add_peer(peer_info) - if not success: - # Registry rejected (already exists) — rollback provisioned accounts - for svc in ('files', 'calendar', 'email', 'auth'): - try: - if svc == 'files': - file_manager.delete_user(peer_name) - elif svc == 'calendar': - calendar_manager.delete_calendar_user(peer_name) - elif svc == 'email': - email_manager.delete_email_user(peer_name, _configured_domain()) - elif svc == 'auth': - auth_manager.delete_user(peer_name) - except Exception: - pass - return jsonify({"error": f"Peer {peer_name} already exists"}), 400 - peer_added_to_registry = True - - # Step 2: Firewall rules (critical) - firewall_manager.apply_peer_rules(peer_info['ip'], peer_info) - - # Step 3: Add peer to WireGuard server config (non-fatal if WG is not running) - wg_allowed = f"{assigned_ip}/32" if '/' not in assigned_ip else assigned_ip - try: - wireguard_manager.add_peer(peer_name, data['public_key'], endpoint_ip='', allowed_ips=wg_allowed) - except Exception as wg_err: - logger.warning(f"Peer {peer_name}: WireGuard server config update failed (non-fatal): {wg_err}") - - # Step 4: Update DNS rules - firewall_manager.apply_all_dns_rules(peer_registry.list_peers(), COREFILE_PATH, _configured_domain(), - cell_links=cell_link_manager.list_connections()) - return jsonify({"message": f"Peer {peer_name} added successfully", "ip": assigned_ip}), 201 - - except Exception as e: - # Rollback registry entry if we got past that step - if peer_added_to_registry: - try: - peer_registry.remove_peer(peer_name) - except Exception: - pass - logger.error(f"Error adding peer {peer_name}: {e}") - return jsonify({'error': str(e)}), 500 - - except Exception as e: - logger.error(f"Error adding peer: {e}") - return jsonify({"error": str(e)}), 500 - - -@app.route('/api/peers/', methods=['PUT']) -def update_peer(peer_name): - """Update peer settings. Marks config_needs_reinstall if VPN config changed.""" - try: - data = request.get_json(silent=True) or {} - existing = peer_registry.get_peer(peer_name) - if not existing: - return jsonify({"error": "Peer not found"}), 404 - - # Detect changes that require client to reinstall tunnel config - config_changed = ( - ('internet_access' in data and data['internet_access'] != existing.get('internet_access', True)) or - ('ip' in data and data['ip'] != existing.get('ip')) or - ('persistent_keepalive' in data and data['persistent_keepalive'] != existing.get('persistent_keepalive')) - ) - - updates = {k: v for k, v in data.items()} - if config_changed: - updates['config_needs_reinstall'] = True - - success = peer_registry.update_peer(peer_name, updates) - if success: - # Re-apply server-side enforcement with updated settings - updated_peer = peer_registry.get_peer(peer_name) - if updated_peer: - firewall_manager.apply_peer_rules(updated_peer['ip'], updated_peer) - firewall_manager.apply_all_dns_rules(peer_registry.list_peers(), COREFILE_PATH, _configured_domain(), - cell_links=cell_link_manager.list_connections()) - result = {"message": f"Peer {peer_name} updated", "config_changed": config_changed} - return jsonify(result) - else: - return jsonify({"error": "Update failed"}), 500 - except Exception as e: - logger.error(f"Error updating peer {peer_name}: {e}") - return jsonify({"error": str(e)}), 500 - - -@app.route('/api/peers//clear-reinstall', methods=['POST']) -def clear_peer_reinstall(peer_name): - """Clear the config_needs_reinstall flag once user has downloaded new config.""" - try: - peer_registry.clear_reinstall_flag(peer_name) - return jsonify({"message": "Reinstall flag cleared"}) - except Exception as e: - return jsonify({"error": str(e)}), 500 - - -@app.route('/api/peers/', methods=['DELETE']) -def remove_peer(peer_name): - """Remove a peer and clean up firewall, DNS, and all service accounts.""" - try: - peer = peer_registry.get_peer(peer_name) - if not peer: - return jsonify({"message": f"Peer {peer_name} not found or already removed"}) - peer_ip = peer.get('ip') - peer_pubkey = peer.get('public_key', '') - success = peer_registry.remove_peer(peer_name) - if success: - if peer_ip: - firewall_manager.clear_peer_rules(peer_ip) - firewall_manager.apply_all_dns_rules(peer_registry.list_peers(), COREFILE_PATH, _configured_domain(), - cell_links=cell_link_manager.list_connections()) - # Remove peer from WireGuard server config (non-fatal) - if peer_pubkey: - try: - wireguard_manager.remove_peer(peer_pubkey) - except Exception as wg_err: - logger.warning(f"Peer {peer_name}: WireGuard removal failed (non-fatal): {wg_err}") - # Clean up all provisioned service accounts (best-effort) - for _cleanup in [ - lambda: email_manager.delete_email_user(peer_name, _configured_domain()), - lambda: calendar_manager.delete_calendar_user(peer_name), - lambda: file_manager.delete_user(peer_name), - lambda: auth_manager.delete_user(peer_name), - ]: - try: - _cleanup() - except Exception: - pass - return jsonify({"message": f"Peer {peer_name} removed successfully"}) - except Exception as e: - logger.error(f"Error removing peer: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/peers/register', methods=['POST']) -def register_peer(): - try: - data = request.get_json(silent=True) - if data is None: - return jsonify({"error": "No data provided"}), 400 - result = peer_registry.register_peer(data) - return jsonify(result) - except Exception as e: - logger.error(f"Error registering peer: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/peers//unregister', methods=['DELETE']) -def unregister_peer(peer_name): - """Unregister a peer.""" - try: - result = peer_registry.unregister_peer(peer_name) - return jsonify(result) - except Exception as e: - logger.error(f"Error unregistering peer: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/peers//update-ip', methods=['PUT']) -def update_peer_ip_registry(peer_name): - """Update peer IP.""" - try: - data = request.get_json(silent=True) - new_ip = data.get('ip') if data else None - if not new_ip: - return jsonify({"error": "Missing ip"}), 400 - success = peer_registry.update_peer_ip(peer_name, new_ip) - if success: - # Update routing and WireGuard configs - try: - routing_manager.update_peer_ip(peer_name, new_ip) - except Exception as e: - logger.warning(f"RoutingManager update_peer_ip failed: {e}") - try: - # For now, skip WireGuard update - method not implemented - logger.warning(f"WireGuardManager update_peer_ip not implemented yet") - except Exception as e: - logger.warning(f"WireGuardManager update_peer_ip failed: {e}") - return jsonify({"message": f"IP update received for {peer_name}"}) - else: - return jsonify({"error": f"Peer {peer_name} not found"}), 404 - except Exception as e: - logger.error(f"Error updating peer IP: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/ip-update', methods=['POST']) -def ip_update(): - """Handle IP update from peer.""" - try: - data = request.get_json(silent=True) - if data is None: - return jsonify({"error": "No data provided"}), 400 - peer_name = data.get('peer') - new_ip = data.get('ip') - if not peer_name or not new_ip: - return jsonify({"error": "Missing peer or ip"}), 400 - success = peer_registry.update_peer_ip(peer_name, new_ip) - if success: - # Update routing and WireGuard configs - try: - routing_manager.update_peer_ip(peer_name, new_ip) - except Exception as e: - logger.warning(f"RoutingManager update_peer_ip failed: {e}") - try: - # For now, skip WireGuard update - method not implemented - logger.warning(f"WireGuardManager update_peer_ip not implemented yet") - except Exception as e: - logger.warning(f"WireGuardManager update_peer_ip failed: {e}") - return jsonify({"message": f"IP update received for {peer_name}"}) - else: - return jsonify({"error": f"Peer {peer_name} not found"}), 404 - except Exception as e: - logger.error(f"Error handling IP update: {e}") - return jsonify({"error": str(e)}), 500 - -# Email Services API -@app.route('/api/email/users', methods=['GET']) -def get_email_users(): - """Get email users.""" - try: - users = email_manager.get_users() - return jsonify(users) - except Exception as e: - logger.error(f"Error getting email users: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/email/users', methods=['POST']) -def create_email_user(): - """Create email user.""" - try: - data = request.get_json(silent=True) - if data is None: - return jsonify({"error": "No data provided"}), 400 - username = data.get('username') - domain = data.get('domain') or _configured_domain() - password = data.get('password') - if not username or not password: - return jsonify({"error": "Missing required fields: username, password"}), 400 - result = email_manager.create_email_user(username, domain, password) - return jsonify({"created": result}) - except Exception as e: - logger.error(f"Error creating email user: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/email/users/', methods=['DELETE']) -def delete_email_user(username): - """Delete email user.""" - try: - domain = request.args.get('domain') or _configured_domain() - result = email_manager.delete_email_user(username, domain) - return jsonify({"deleted": result}) - except Exception as e: - logger.error(f"Error deleting email user: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/email/status', methods=['GET']) -def get_email_status(): - """Get email service status.""" - try: - status = email_manager.get_status() - return jsonify(status) - except Exception as e: - logger.error(f"Error getting email status: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/email/connectivity', methods=['GET']) -def test_email_connectivity(): - """Test email connectivity.""" - try: - result = email_manager.test_connectivity() - return jsonify(result) - except Exception as e: - logger.error(f"Error testing email connectivity: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/email/send', methods=['POST']) -def send_email(): - try: - data = request.get_json(silent=True) - if data is None: - return jsonify({"error": "No data provided"}), 400 - result = email_manager.send_email(data) - return jsonify(result) - except Exception as e: - logger.error(f"Error sending email: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/email/mailbox/', methods=['GET']) -def get_mailbox_info(username): - """Get mailbox information.""" - try: - result = email_manager.get_mailbox_info(username) - return jsonify(result) - except Exception as e: - logger.error(f"Error getting mailbox info: {e}") - return jsonify({"error": str(e)}), 500 - -# Calendar Services API -@app.route('/api/calendar/users', methods=['GET']) -def get_calendar_users(): - """Get calendar users.""" - try: - users = calendar_manager.get_users() - return jsonify(users) - except Exception as e: - logger.error(f"Error getting calendar users: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/calendar/users', methods=['POST']) -def create_calendar_user(): - """Create calendar user.""" - try: - data = request.get_json(silent=True) - if data is None: - return jsonify({"error": "No data provided"}), 400 - username = data.get('username') - password = data.get('password') - if not username or not password: - return jsonify({"error": "Missing required fields: username, password"}), 400 - result = calendar_manager.create_calendar_user(username, password) - return jsonify({"created": result}) - except Exception as e: - logger.error(f"Error creating calendar user: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/calendar/users/', methods=['DELETE']) -def delete_calendar_user(username): - """Delete calendar user.""" - try: - result = calendar_manager.delete_calendar_user(username) - return jsonify({"deleted": result}) - except Exception as e: - logger.error(f"Error deleting calendar user: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/calendar/calendars', methods=['POST']) -def create_calendar(): - """Create calendar.""" - try: - data = request.get_json(silent=True) - if data is None: - return jsonify({"error": "No data provided"}), 400 - username = data.get('username') - calendar_name = data.get('name') or data.get('calendar_name') - if not username or not calendar_name: - return jsonify({"error": "Missing required fields: username, name"}), 400 - result = calendar_manager.create_calendar( - username, - calendar_name, - description=data.get('description', ''), - color=data.get('color', '#4285f4'), - ) - return jsonify({"created": result}) - except Exception as e: - logger.error(f"Error creating calendar: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/calendar/events', methods=['POST']) -def add_calendar_event(): - try: - data = request.get_json(silent=True) - if data is None: - return jsonify({"error": "No data provided"}), 400 - username = data.get('username') - calendar_name = data.get('calendar_name') or data.get('calendar') - if not username or not calendar_name: - return jsonify({"error": "Missing required fields: username, calendar_name"}), 400 - event_data = {k: v for k, v in data.items() if k not in ('username', 'calendar_name', 'calendar')} - result = calendar_manager.add_event(username, calendar_name, event_data) - return jsonify({"created": result}) - except Exception as e: - logger.error(f"Error adding calendar event: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/calendar/events//', methods=['GET']) -def get_calendar_events(username, calendar_name): - """Get calendar events.""" - try: - params = request.args.to_dict() - result = calendar_manager.get_events(username, calendar_name, params) - return jsonify(result) - except Exception as e: - logger.error(f"Error getting calendar events: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/calendar/status', methods=['GET']) -def get_calendar_status(): - """Get calendar service status.""" - try: - status = calendar_manager.get_status() - return jsonify(status) - except Exception as e: - logger.error(f"Error getting calendar status: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/calendar/connectivity', methods=['GET']) -def test_calendar_connectivity(): - """Test calendar connectivity.""" - try: - result = calendar_manager.test_connectivity() - return jsonify(result) - except Exception as e: - logger.error(f"Error testing calendar connectivity: {e}") - return jsonify({"error": str(e)}), 500 - -# File Services API -@app.route('/api/files/users', methods=['GET']) -def get_file_users(): - """Get file storage users.""" - try: - users = file_manager.get_users() - return jsonify(users) - except Exception as e: - logger.error(f"Error getting file users: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/files/users', methods=['POST']) -def create_file_user(): - """Create file storage user.""" - try: - data = request.get_json(silent=True) - if data is None: - return jsonify({"error": "No data provided"}), 400 - username = data.get('username') - password = data.get('password') - if not username or not password: - return jsonify({"error": "Missing required fields: username, password"}), 400 - result = file_manager.create_user(username, password) - return jsonify({"created": result}) - except Exception as e: - logger.error(f"Error creating file user: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/files/users/', methods=['DELETE']) -def delete_file_user(username): - """Delete file storage user.""" - try: - result = file_manager.delete_user(username) - return jsonify(result) - except Exception as e: - logger.error(f"Error deleting file user: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/files/folders', methods=['POST']) -def create_folder(): - """Create folder.""" - try: - data = request.get_json(silent=True) - if data is None: - return jsonify({"error": "No data provided"}), 400 - username = data.get('username') - folder_path = data.get('folder_path') or data.get('path') - if not username or not folder_path: - return jsonify({"error": "Missing required fields: username, folder_path"}), 400 - result = file_manager.create_folder(username, folder_path) - return jsonify({"created": result}) - except ValueError as e: - return jsonify({"error": str(e)}), 400 - except Exception as e: - logger.error(f"Error creating folder: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/files/folders//', methods=['DELETE']) -def delete_folder(username, folder_path): - """Delete folder.""" - try: - result = file_manager.delete_folder(username, folder_path) - return jsonify(result) - except ValueError as e: - return jsonify({"error": str(e)}), 400 - except Exception as e: - logger.error(f"Error deleting folder: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/files/upload/', methods=['POST']) -def upload_file(username): - """Upload file.""" - try: - if 'file' not in request.files: - return jsonify({"error": "No file provided"}), 400 - - file = request.files['file'] - path = request.form.get('path', '') or file.filename or '' - file_data = file.read() - - result = file_manager.upload_file(username, path, file_data) - return jsonify({"uploaded": result}) - except ValueError as e: - return jsonify({"error": str(e)}), 400 - except Exception as e: - logger.error(f"Error uploading file: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/files/download//', methods=['GET']) -def download_file(username, file_path): - """Download file.""" - try: - result = file_manager.download_file(username, file_path) - return jsonify(result) - except ValueError as e: - return jsonify({"error": str(e)}), 400 - except Exception as e: - logger.error(f"Error downloading file: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/files/delete//', methods=['DELETE']) -def delete_file(username, file_path): - """Delete file.""" - try: - result = file_manager.delete_file(username, file_path) - return jsonify(result) - except ValueError as e: - return jsonify({"error": str(e)}), 400 - except Exception as e: - logger.error(f"Error deleting file: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/files/list/', methods=['GET']) -def list_files(username): - """List files.""" - try: - folder = request.args.get('folder', '') - result = file_manager.list_files(username, folder) - return jsonify(result) - except ValueError as e: - return jsonify({"error": str(e)}), 400 - except Exception as e: - logger.error(f"Error listing files: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/files/status', methods=['GET']) -def get_file_status(): - """Get file service status.""" - try: - status = file_manager.get_status() - return jsonify(status) - except Exception as e: - logger.error(f"Error getting file status: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/files/connectivity', methods=['GET']) -def test_file_connectivity(): - """Test file service connectivity.""" - try: - result = file_manager.test_connectivity() - return jsonify(result) - except Exception as e: - logger.error(f"Error testing file connectivity: {e}") - return jsonify({"error": str(e)}), 500 - -# Routing API -@app.route('/api/routing/status', methods=['GET']) -def get_routing_status(): - """Get routing status.""" - try: - status = routing_manager.get_status() - return jsonify(status) - except Exception as e: - logger.error(f"Error getting routing status: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/routing/setup', methods=['POST']) -def setup_routing(): - """Apply/verify routing setup (WireGuard handles NAT via PostUp rules).""" - try: - status = routing_manager.get_status() - return jsonify({'success': True, 'message': 'Routing managed by WireGuard PostUp rules', **status}) - except Exception as e: - return jsonify({"error": str(e)}), 500 - -@app.route('/api/routing/nat', methods=['POST']) -def add_nat_rule(): - """Add NAT rule. - JSON fields: - - source_network (CIDR) - - target_interface (str) - - masquerade (bool, default True) - - nat_type (MASQUERADE, SNAT, DNAT) - - protocol (TCP, UDP, ALL) - - external_port (str, optional) - - internal_ip (str, optional) - - internal_port (str, optional) - """ - try: - data = request.get_json(silent=True) or {} - result = routing_manager.add_nat_rule( - source_network=data.get('source_network'), - target_interface=data.get('target_interface'), - masquerade=data.get('masquerade', True), - nat_type=data.get('nat_type', 'MASQUERADE'), - protocol=data.get('protocol', 'ALL'), - external_port=data.get('external_port'), - internal_ip=data.get('internal_ip'), - internal_port=data.get('internal_port') - ) - return jsonify({'success': result}) - except Exception as e: - logger.error(f"Error adding NAT rule: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/routing/nat/', methods=['DELETE']) -def remove_nat_rule(rule_id): - """Remove NAT rule.""" - try: - result = routing_manager.remove_nat_rule(rule_id) - return jsonify(result) - except Exception as e: - logger.error(f"Error removing NAT rule: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/routing/peers', methods=['POST']) -def add_peer_route(): - """Add peer route.""" - try: - data = request.get_json(silent=True) or {} - peer_name = data.get('peer_name') - peer_ip = data.get('peer_ip') - allowed_networks = data.get('allowed_networks', []) - route_type = data.get('route_type', 'lan') - if not peer_name or not peer_ip: - return jsonify({"error": "Missing required fields: peer_name, peer_ip"}), 400 - result = routing_manager.add_peer_route(peer_name, peer_ip, allowed_networks, route_type) - return jsonify({"added": result}) - except Exception as e: - logger.error(f"Error adding peer route: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/routing/peers/', methods=['DELETE']) -def remove_peer_route(peer_name): - """Remove peer route.""" - try: - result = routing_manager.remove_peer_route(peer_name) - return jsonify(result) - except Exception as e: - logger.error(f"Error removing peer route: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/routing/exit-nodes', methods=['POST']) -def add_exit_node(): - """Add exit node.""" - try: - data = request.get_json(silent=True) or {} - peer_name = data.get('peer_name') - peer_ip = data.get('peer_ip') - if not peer_name or not peer_ip: - return jsonify({"error": "Missing required fields: peer_name, peer_ip"}), 400 - result = routing_manager.add_exit_node(peer_name, peer_ip, data.get('allowed_domains')) - return jsonify({"added": result}) - except Exception as e: - logger.error(f"Error adding exit node: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/routing/bridge', methods=['POST']) -def add_bridge_route(): - """Add bridge route.""" - try: - data = request.get_json(silent=True) or {} - source_peer = data.get('source_peer') - target_peer = data.get('target_peer') - allowed_networks = data.get('allowed_networks', []) - if not source_peer or not target_peer: - return jsonify({"error": "Missing required fields: source_peer, target_peer"}), 400 - result = routing_manager.add_bridge_route(source_peer, target_peer, allowed_networks) - return jsonify({"added": result}) - except Exception as e: - logger.error(f"Error adding bridge route: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/routing/split', methods=['POST']) -def add_split_route(): - """Add split route.""" - try: - data = request.get_json(silent=True) or {} - network = data.get('network') - exit_peer = data.get('exit_peer') - if not network or not exit_peer: - return jsonify({"error": "Missing required fields: network, exit_peer"}), 400 - result = routing_manager.add_split_route(network, exit_peer, data.get('fallback_peer')) - return jsonify({"added": result}) - except Exception as e: - logger.error(f"Error adding split route: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/routing/firewall', methods=['POST']) -def add_firewall_rule(): - """Add firewall rule. - JSON fields: - - rule_type (INPUT, OUTPUT, FORWARD) - - source (CIDR) - - destination (CIDR) - - action (ACCEPT, DROP, REJECT) - - protocol (TCP, UDP, ICMP, ALL) - - port (str, optional) - - port_range (str, optional, e.g. '1000-2000') - """ - try: - data = request.get_json(silent=True) or {} - result = routing_manager.add_firewall_rule( - rule_type=data.get('rule_type'), - source=data.get('source'), - destination=data.get('destination'), - action=data.get('action', 'ACCEPT'), - port=data.get('port'), - protocol=data.get('protocol', 'ALL'), - port_range=data.get('port_range') - ) - return jsonify({'success': result}) - except Exception as e: - logger.error(f"Error adding firewall rule: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/routing/firewall/', methods=['DELETE']) -def remove_firewall_rule(rule_id): - try: - result = routing_manager.remove_firewall_rule(rule_id) - return jsonify({'success': result}), (200 if result else 404) - except Exception as e: - return jsonify({'error': str(e)}), 500 - -@app.route('/api/routing/live-iptables', methods=['GET']) -def get_live_iptables(): - try: - return jsonify(routing_manager.get_live_iptables()) - except Exception as e: - return jsonify({'error': str(e)}), 500 - -@app.route('/api/routing/connectivity', methods=['POST']) -def test_routing_connectivity(): - """Test routing connectivity.""" - try: - data = request.get_json(silent=True) or {} - target_ip = data.get('target_ip', '8.8.8.8') - via_peer = data.get('via_peer') - result = routing_manager.test_routing_connectivity(target_ip, via_peer) - return jsonify(result) - except Exception as e: - logger.error(f"Error testing routing connectivity: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/routing/logs', methods=['GET']) -def get_routing_logs(): - """Get routing logs.""" - try: - lines = request.args.get('lines', 50, type=int) - result = routing_manager.get_logs(lines) - return jsonify(result) - except Exception as e: - logger.error(f"Error getting routing logs: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/routing/nat', methods=['GET']) -def get_nat_rules(): - """Get all NAT rules.""" - try: - rules = routing_manager.get_nat_rules() - return jsonify({"nat_rules": rules}) - except Exception as e: - logger.error(f"Error getting NAT rules: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/routing/peers', methods=['GET']) -def get_peer_routes(): - """Get all peer routes.""" - try: - routes = routing_manager.get_peer_routes() - return jsonify({"peer_routes": routes}) - except Exception as e: - logger.error(f"Error getting peer routes: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/routing/firewall', methods=['GET']) -def get_firewall_rules(): - """Get all firewall rules.""" - try: - rules = routing_manager.get_firewall_rules() - return jsonify({"firewall_rules": rules}) - except Exception as e: - logger.error(f"Error getting firewall rules: {e}") - return jsonify({"error": str(e)}), 500 - -# Vault & Trust API (Phase 6) -@app.route('/api/vault/status', methods=['GET']) -def get_vault_status(): - """Get vault status.""" - try: - status = current_app.vault_manager.get_status() - return jsonify(status) - except Exception as e: - logger.error(f"Error getting vault status: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/vault/certificates', methods=['GET']) -def get_certificates(): - """Get all certificates.""" - try: - certificates = current_app.vault_manager.list_certificates() - return jsonify(certificates) - except Exception as e: - logger.error(f"Error getting certificates: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/vault/certificates', methods=['POST']) -def generate_certificate(): - try: - data = request.get_json(silent=True) - if data is None: - return jsonify({"error": "No data provided"}), 400 - result = current_app.vault_manager.generate_certificate( - common_name=data['common_name'], - domains=data.get('domains', []), - key_size=data.get('key_size', 2048), - days=data.get('days', 365) - ) - return jsonify(result) - except Exception as e: - logger.error(f"Error generating certificate: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/vault/certificates/', methods=['DELETE']) -def revoke_certificate(common_name): - """Revoke certificate.""" - try: - result = current_app.vault_manager.revoke_certificate(common_name) - return jsonify({"revoked": result}) - except Exception as e: - logger.error(f"Error revoking certificate: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/vault/ca/certificate', methods=['GET']) -def get_ca_certificate(): - """Get CA certificate.""" - try: - cert = current_app.vault_manager.get_ca_certificate() - return jsonify({"certificate": cert}) - except Exception as e: - logger.error(f"Error getting CA certificate: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/vault/age/public-key', methods=['GET']) -def get_age_public_key(): - """Get Age public key.""" - try: - key = current_app.vault_manager.get_age_public_key() - return jsonify({"public_key": key}) - except Exception as e: - logger.error(f"Error getting Age public key: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/vault/trust/keys', methods=['GET']) -def get_trusted_keys(): - """Get trusted keys.""" - try: - keys = current_app.vault_manager.get_trusted_keys() - return jsonify(keys) - except Exception as e: - logger.error(f"Error getting trusted keys: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/vault/trust/keys', methods=['POST']) -def add_trusted_key(): - try: - data = request.get_json(silent=True) - if data is None: - return jsonify({"error": "No data provided"}), 400 - result = current_app.vault_manager.add_trusted_key( - name=data['name'], - public_key=data['public_key'], - trust_level=data.get('trust_level', 'direct') - ) - return jsonify({"added": result}) - except Exception as e: - logger.error(f"Error adding trusted key: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/vault/trust/keys/', methods=['DELETE']) -def remove_trusted_key(name): - """Remove trusted key.""" - try: - result = current_app.vault_manager.remove_trusted_key(name) - return jsonify({"removed": result}) - except Exception as e: - logger.error(f"Error removing trusted key: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/vault/trust/verify', methods=['POST']) -def verify_trust_chain(): - try: - data = request.get_json(silent=True) - if data is None: - return jsonify({"error": "No data provided"}), 400 - result = current_app.vault_manager.verify_trust_chain( - peer_name=data['peer_name'], - signature=data['signature'], - data=data['data'] - ) - return jsonify({"verified": result}) - except Exception as e: - logger.error(f"Error verifying trust chain: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/vault/trust/chains', methods=['GET']) -def get_trust_chains(): - """Get trust chains.""" - try: - chains = current_app.vault_manager.get_trust_chains() - return jsonify(chains) - except Exception as e: - logger.error(f"Error getting trust chains: {e}") - return jsonify({"error": str(e)}), 500 - -# Services API -@app.route('/api/services/status', methods=['GET']) -def get_all_services_status(): - """Get status of all services.""" - try: - # Use service bus to get status from all services - services_status = {} - for service_name in service_bus.list_services(): - try: - service = service_bus.get_service(service_name) - status = service.get_status() - - # Clean up status for UI consumption - if isinstance(status, dict): - # Extract core status information - clean_status = { - 'status': status.get('status', 'unknown'), - 'running': status.get('running', False), - 'timestamp': status.get('timestamp', datetime.utcnow().isoformat()) - } - - # Add service-specific metrics - if service_name == 'network': - clean_status.update({ - 'dns_status': status.get('dns_running', False), - 'dhcp_status': status.get('dhcp_running', False), - 'ntp_status': status.get('ntp_running', False) - }) - elif service_name == 'wireguard': - clean_status.update({ - 'peers_count': status.get('peers_count', 0), - 'interface': status.get('interface', 'unknown') - }) - elif service_name == 'email': - clean_status.update({ - 'users_count': status.get('users_count', 0), - 'domain': status.get('domain', 'unknown') - }) - elif service_name == 'calendar': - clean_status.update({ - 'users_count': status.get('users_count', 0), - 'calendars_count': status.get('calendars_count', 0) - }) - elif service_name == 'files': - clean_status.update({ - 'users_count': status.get('users_count', 0), - 'storage_used': status.get('total_storage_used', {}) - }) - elif service_name == 'routing': - clean_status.update({ - 'nat_rules_count': status.get('nat_rules_count', 0), - 'peer_routes_count': status.get('peer_routes_count', 0), - 'firewall_rules_count': status.get('firewall_rules_count', 0) - }) - elif service_name == 'vault': - clean_status.update({ - 'certificates_count': status.get('certificates_count', 0), - 'trusted_keys_count': status.get('trusted_keys_count', 0) - }) - - services_status[service_name] = clean_status - else: - services_status[service_name] = {'status': str(status), 'running': bool(status)} - - except Exception as e: - services_status[service_name] = {'error': str(e), 'status': 'offline', 'running': False} - - return jsonify({ - "network": services_status.get('network', {}), - "wireguard": services_status.get('wireguard', {}), - "email": services_status.get('email', {}), - "calendar": services_status.get('calendar', {}), - "files": services_status.get('files', {}), - "routing": services_status.get('routing', {}), - "vault": services_status.get('vault', {}), - "timestamp": datetime.utcnow().isoformat() - }) - except Exception as e: - logger.error(f"Error getting all services status: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/services/connectivity', methods=['GET']) -def test_all_services_connectivity(): - """Test connectivity of all services.""" - try: - # Use service bus to test connectivity - connectivity_results = {} - for service_name in service_bus.list_services(): - try: - service = service_bus.get_service(service_name) - if hasattr(service, 'test_connectivity'): - connectivity_results[service_name] = service.test_connectivity() - else: - connectivity_results[service_name] = {'status': 'ok', 'message': 'No connectivity test available'} - except Exception as e: - connectivity_results[service_name] = {'status': 'error', 'message': str(e)} - - return jsonify({ - "network": connectivity_results.get('network', {}), - "wireguard": connectivity_results.get('wireguard', {}), - "email": connectivity_results.get('email', {}), - "calendar": connectivity_results.get('calendar', {}), - "files": connectivity_results.get('files', {}), - "routing": connectivity_results.get('routing', {}), - "timestamp": datetime.utcnow().isoformat() - }) - except Exception as e: - logger.error(f"Error testing all services connectivity: {e}") - return jsonify({"error": str(e)}), 500 - @app.route('/api/health/history', methods=['GET']) def get_health_history(): """Get recent unified health check results.""" @@ -2996,345 +585,6 @@ def clear_health_history(): service_alert_counters = {} return jsonify({'message': 'Health history cleared'}) -@app.route('/api/logs', methods=['GET']) -def get_backend_logs(): - """Get backend log file contents (last N lines).""" - log_file = os.path.join(os.path.dirname(__file__), 'picell.log') - lines = int(request.args.get('lines', 100)) - try: - if not os.path.exists(log_file): - return jsonify({"error": "Log file not found."}), 404 - with open(log_file, 'r', encoding='utf-8', errors='ignore') as f: - all_lines = f.readlines() - tail_lines = all_lines[-lines:] if lines > 0 else all_lines - return jsonify({"log": ''.join(tail_lines)}) - except Exception as e: - logger.error(f"Error reading log file: {e}") - return jsonify({"error": str(e)}), 500 - -@app.route('/api/containers', methods=['GET']) -def list_containers(): - if not is_local_request(): - return jsonify({'error': 'Access denied'}), 403 - try: - containers = container_manager.list_containers() - return jsonify(containers) - except Exception as e: - logger.error(f"Error listing containers: {e}") - return jsonify({'error': str(e)}), 500 - -@app.route('/api/containers//start', methods=['POST']) -def start_container(name): - if not is_local_request(): - return jsonify({'error': 'Access denied'}), 403 - try: - success = container_manager.start_container(name) - return jsonify({'started': success}) - except Exception as e: - logger.error(f"Error starting container {name}: {e}") - return jsonify({'error': str(e)}), 500 - -@app.route('/api/containers//stop', methods=['POST']) -def stop_container(name): - if not is_local_request(): - return jsonify({'error': 'Access denied'}), 403 - try: - success = container_manager.stop_container(name) - return jsonify({'stopped': success}) - except Exception as e: - logger.error(f"Error stopping container {name}: {e}") - return jsonify({'error': str(e)}), 500 - -@app.route('/api/containers//restart', methods=['POST']) -def restart_container(name): - if not is_local_request(): - return jsonify({'error': 'Access denied'}), 403 - try: - success = container_manager.restart_container(name) - return jsonify({'restarted': success}) - except Exception as e: - logger.error(f"Error restarting container {name}: {e}") - return jsonify({'error': str(e)}), 500 - -@app.route('/api/containers//logs', methods=['GET']) -def get_container_logs(name): - # Temporarily disable access control for debugging - if not is_local_request(): - return jsonify({'error': 'Access denied'}), 403 - tail = request.args.get('tail', default=100, type=int) - try: - logs = container_manager.get_container_logs(name, tail=tail) - return jsonify({'logs': logs}) - except Exception as e: - logger.error(f"Error getting logs for container {name}: {e}") - return jsonify({'error': str(e)}), 500 - -@app.route('/api/containers//stats', methods=['GET']) -def get_container_stats(name): - # Temporarily disable access control for debugging - if not is_local_request(): - return jsonify({'error': 'Access denied'}), 403 - try: - stats = container_manager.get_container_stats(name) - return jsonify(stats) - except Exception as e: - logger.error(f"Error getting stats for container {name}: {e}") - return jsonify({'error': str(e)}), 500 - -@app.route('/api/vault/secrets', methods=['GET']) -def list_secrets(): - # Temporarily disable access control for debugging - if not is_local_request(): - return jsonify({'error': 'Access denied'}), 403 - secrets = app.vault_manager.list_secrets() - return jsonify({'secrets': secrets}) - -@app.route('/api/vault/secrets', methods=['POST']) -def store_secret(): - # Temporarily disable access control for debugging - if not is_local_request(): - return jsonify({'error': 'Access denied'}), 403 - data = request.get_json(silent=True) - if not data or 'name' not in data or 'value' not in data: - return jsonify({'error': 'Missing name or value'}), 400 - app.vault_manager.store_secret(data['name'], data['value']) - return jsonify({'stored': True}) - -@app.route('/api/vault/secrets/', methods=['GET']) -def get_secret(name): - # Temporarily disable access control for debugging - if not is_local_request(): - return jsonify({'error': 'Access denied'}), 403 - value = app.vault_manager.get_secret(name) - if value is None: - return jsonify({'error': 'Not found'}), 404 - return jsonify({'name': name, 'value': value}) - -@app.route('/api/vault/secrets/', methods=['DELETE']) -def delete_secret(name): - # Temporarily disable access control for debugging - if not is_local_request(): - return jsonify({'error': 'Access denied'}), 403 - result = app.vault_manager.delete_secret(name) - return jsonify({'deleted': result}) - -# Enhance container creation to support secrets -@app.route('/api/containers', methods=['POST']) -def create_container(): - # Temporarily disable access control for debugging - if not is_local_request(): - return jsonify({'error': 'Access denied'}), 403 - data = request.get_json(silent=True) - if not data or 'image' not in data: - return jsonify({'error': 'Missing image parameter'}), 400 - name = data.get('name', '') - env = data.get('env', {}) - # If 'secrets' is provided, resolve secret values and add to env - secrets = data.get('secrets', []) - if secrets: - for secret_name in secrets: - secret_value = app.vault_manager.get_secret(secret_name) - if secret_value is not None: - env[secret_name] = secret_value - volumes = data.get('volumes', {}) - command = data.get('command', '') - ports = data.get('ports', {}) - if volumes: - allowed_prefixes = ('/home/roof/pic/data/', '/home/roof/pic/config/', '/tmp/') - for host_path in volumes.keys(): - resolved = os.path.realpath(str(host_path)) - if not any(resolved.startswith(p) for p in allowed_prefixes): - return jsonify({'error': f'Volume mount not allowed: {host_path}'}), 403 - result = container_manager.create_container( - image=data['image'], - name=name, - env=env, - volumes=volumes, - command=command, - ports=ports - ) - if 'error' in result: - return jsonify(result), 500 - return jsonify(result) - -@app.route('/api/containers/', methods=['DELETE']) -def remove_container(name): - # Temporarily disable access control for debugging - if not is_local_request(): - return jsonify({'error': 'Access denied'}), 403 - force = request.args.get('force', default=False, type=bool) - success = container_manager.remove_container(name, force=force) - return jsonify({'removed': success}) - -@app.route('/api/images', methods=['GET']) -def list_images(): - # Temporarily disable access control for debugging - if not is_local_request(): - return jsonify({'error': 'Access denied'}), 403 - images = container_manager.list_images() - return jsonify(images) - -@app.route('/api/images/pull', methods=['POST']) -def pull_image(): - if not is_local_request(): - return jsonify({'error': 'Access denied'}), 403 - data = request.get_json(silent=True) - if not data or 'image' not in data: - return jsonify({'error': 'Missing image parameter'}), 400 - result = container_manager.pull_image(data['image']) - if 'error' in result: - return jsonify(result), 500 - return jsonify(result) - -@app.route('/api/images/', methods=['DELETE']) -def remove_image(image): - if not is_local_request(): - return jsonify({'error': 'Access denied'}), 403 - force = request.args.get('force', default=False, type=bool) - success = container_manager.remove_image(image, force=force) - return jsonify({'removed': success}) - -@app.route('/api/volumes', methods=['GET']) -def list_volumes(): - # Temporarily disable access control for debugging - if not is_local_request(): - return jsonify({'error': 'Access denied'}), 403 - volumes = container_manager.list_volumes() - return jsonify(volumes) - -@app.route('/api/volumes', methods=['POST']) -def create_volume(): - if not is_local_request(): - return jsonify({'error': 'Access denied'}), 403 - data = request.get_json(silent=True) - if not data or 'name' not in data: - return jsonify({'error': 'Missing name parameter'}), 400 - result = container_manager.create_volume(data['name']) - if 'error' in result: - return jsonify(result), 500 - return jsonify(result) - -@app.route('/api/volumes/', methods=['DELETE']) -def remove_volume(name): - if not is_local_request(): - return jsonify({'error': 'Access denied'}), 403 - force = request.args.get('force', default=False, type=bool) - success = container_manager.remove_volume(name, force=force) - return jsonify({'removed': success}) - - - -# ── Peer-scoped routes (/api/peer/*) ───────────────────────────────────────── -# These routes are accessible to peer-role sessions only (enforced by -# the enforce_auth before_request hook above). - -@app.route('/api/peer/dashboard', methods=['GET']) -def peer_dashboard(): - """Return dashboard info for the authenticated peer including live WireGuard stats.""" - peer_name = session.get('peer_name') - peer = peer_registry.get_peer(peer_name) if peer_name else None - if not peer: - return jsonify({'error': 'Peer not found'}), 404 - - wg_stats = {'online': None, 'transfer_rx': 0, 'transfer_tx': 0, 'last_handshake': None} - public_key = peer.get('public_key') - if public_key: - try: - wg_stats = wireguard_manager.get_peer_status(public_key) - except Exception: - pass - - peer_ip = peer.get('ip', '') - allowed_ips = f"{peer_ip.split('/')[0]}/32" if peer_ip else '' - domain = _configured_domain() - _svc_url_map = { - 'calendar': f'http://calendar.{domain}', - 'files': f'http://files.{domain}', - 'mail': f'http://mail.{domain}', - 'webdav': f'http://webdav.{domain}', - } - service_urls = { - svc: _svc_url_map[svc] - for svc in peer.get('service_access', []) - if svc in _svc_url_map - } - - return jsonify({ - 'name': peer_name, - 'ip': peer_ip, - 'service_access': peer.get('service_access', []), - 'service_urls': service_urls, - 'online': wg_stats.get('online'), - 'transfer_rx': wg_stats.get('transfer_rx', 0), - 'transfer_tx': wg_stats.get('transfer_tx', 0), - 'last_handshake': wg_stats.get('last_handshake'), - 'allowed_ips': peer.get('allowed_ips', allowed_ips), - }) - - -@app.route('/api/peer/services', methods=['GET']) -def peer_services(): - """Return service credentials and access info for the authenticated peer.""" - peer_name = session.get('peer_name') - peer = peer_registry.get_peer(peer_name) if peer_name else None - if not peer: - return jsonify({'error': 'Peer not found'}), 404 - - domain = _configured_domain() - peer_ip = peer.get('ip', '') - - server_public_key = '' - wg_port = 51820 - server_endpoint = '' - try: - server_public_key = wireguard_manager.get_keys().get('public_key', '') - wg_port = config_manager.configs.get('_identity', {}).get('wireguard_port', 51820) - srv = wireguard_manager.get_server_config() - server_endpoint = srv.get('endpoint') or '' - except Exception: - pass - - wg_config = '' - peer_private_key = peer.get('private_key', '') - if peer_private_key: - try: - internet_access = peer.get('internet_access', True) - allowed_ips = wireguard_manager.FULL_TUNNEL_IPS if internet_access else wireguard_manager.get_split_tunnel_ips() - wg_config = wireguard_manager.get_peer_config( - peer_name=peer_name, - peer_ip=peer_ip, - peer_private_key=peer_private_key, - server_endpoint=server_endpoint, - allowed_ips=allowed_ips, - ) - except Exception: - pass - - return jsonify({ - 'username': peer_name, - 'wireguard': { - 'ip': peer_ip, - 'server_public_key': server_public_key, - 'endpoint_port': wg_port, - 'dns': _resolve_peer_dns(), - 'config': wg_config, - }, - 'email': { - 'address': f'{peer_name}@{domain}', - 'smtp': {'host': f'mail.{domain}', 'port': 587}, - 'imap': {'host': f'mail.{domain}', 'port': 993}, - }, - 'caldav': { - 'url': f'http://calendar.{domain}', - 'username': peer_name, - }, - 'files': { - 'url': f'http://files.{domain}', - 'username': peer_name, - }, - }) - - if __name__ == '__main__': debug = os.environ.get('FLASK_DEBUG', '0') == '1' app.run(host='0.0.0.0', port=3000, debug=debug) \ No newline at end of file diff --git a/api/cell_link_manager.py b/api/cell_link_manager.py index 511a8fd..0757274 100644 --- a/api/cell_link_manager.py +++ b/api/cell_link_manager.py @@ -5,6 +5,7 @@ CellLinkManager — manages site-to-site connections between PIC cells. Each connection is stored in data/cell_links.json and manifests as: - A WireGuard [Peer] block (AllowedIPs = remote cell's VPN subnet) - A CoreDNS forwarding block (remote domain → remote cell's DNS IP) + - An iptables FORWARD rule set (service-level access control) """ import os @@ -15,6 +16,20 @@ from typing import Any, Dict, List, Optional logger = logging.getLogger(__name__) +VALID_SERVICES = ('calendar', 'files', 'mail', 'webdav') + +_DEFAULT_PERMISSIONS = { + 'inbound': {s: False for s in VALID_SERVICES}, + 'outbound': {s: False for s in VALID_SERVICES}, +} + + +def _default_perms() -> Dict[str, Any]: + return { + 'inbound': {s: False for s in VALID_SERVICES}, + 'outbound': {s: False for s in VALID_SERVICES}, + } + class CellLinkManager: def __init__(self, data_dir: str, config_dir: str, wireguard_manager, network_manager): @@ -30,7 +45,16 @@ class CellLinkManager: if os.path.exists(self.links_file): try: with open(self.links_file) as f: - return json.load(f) + links = json.load(f) + # Lazy migration: inject permissions field if missing + changed = False + for link in links: + if 'permissions' not in link: + link['permissions'] = _default_perms() + changed = True + if changed: + self._save(links) + return links except Exception: return [] return [] @@ -59,8 +83,13 @@ class CellLinkManager: def list_connections(self) -> List[Dict[str, Any]]: return self._load() - def add_connection(self, invite: Dict[str, Any]) -> Dict[str, Any]: - """Import a remote cell's invite and establish the connection.""" + def add_connection(self, invite: Dict[str, Any], + inbound_services: Optional[List[str]] = None) -> Dict[str, Any]: + """Import a remote cell's invite and establish the connection. + + inbound_services: which of THIS cell's services to share with the remote + cell immediately. Defaults to none (all-deny). + """ links = self._load() name = invite['cell_name'] if any(l['cell_name'] == name for l in links): @@ -82,6 +111,11 @@ class CellLinkManager: if dns_result.get('warnings'): logger.warning('DNS forward warnings for %s: %s', name, dns_result['warnings']) + inbound = [s for s in (inbound_services or []) if s in VALID_SERVICES] + perms = _default_perms() + for s in inbound: + perms['inbound'][s] = True + link = { 'cell_name': name, 'public_key': invite['public_key'], @@ -90,9 +124,18 @@ class CellLinkManager: 'dns_ip': invite['dns_ip'], 'domain': invite['domain'], 'connected_at': datetime.utcnow().isoformat(), + 'permissions': perms, } links.append(link) self._save(links) + + # Apply iptables rules for the new cell (non-fatal if it fails) + try: + import firewall_manager as _fm + _fm.apply_cell_rules(name, invite['vpn_subnet'], inbound) + except Exception as e: + logger.warning(f"apply_cell_rules for {name} failed (non-fatal): {e}") + return link def remove_connection(self, cell_name: str): @@ -102,12 +145,56 @@ class CellLinkManager: if not link: raise ValueError(f"Cell '{cell_name}' not found") + # Clear firewall rules first (non-fatal) + try: + import firewall_manager as _fm + _fm.clear_cell_rules(cell_name) + except Exception as e: + logger.warning(f"clear_cell_rules for {cell_name} failed (non-fatal): {e}") + self.wireguard_manager.remove_peer(link['public_key']) self.network_manager.remove_cell_dns_forward(link['domain']) links = [l for l in links if l['cell_name'] != cell_name] self._save(links) + def update_permissions(self, cell_name: str, + inbound: Dict[str, bool], + outbound: Dict[str, bool]) -> Dict[str, Any]: + """Update service sharing permissions for a cell connection. + + Validates service names, persists, and re-applies iptables rules. + Returns the updated link record. + """ + links = self._load() + link = next((l for l in links if l['cell_name'] == cell_name), None) + if not link: + raise ValueError(f"Cell '{cell_name}' not found") + + # Validate and normalise — only known services, boolean values + clean_inbound = {s: bool(inbound.get(s, False)) for s in VALID_SERVICES} + clean_outbound = {s: bool(outbound.get(s, False)) for s in VALID_SERVICES} + link['permissions'] = {'inbound': clean_inbound, 'outbound': clean_outbound} + self._save(links) + + # Re-apply firewall rules + inbound_list = [s for s, v in clean_inbound.items() if v] + try: + import firewall_manager as _fm + _fm.apply_cell_rules(cell_name, link['vpn_subnet'], inbound_list) + except Exception as e: + logger.warning(f"apply_cell_rules for {cell_name} failed (non-fatal): {e}") + + return link + + def get_permissions(self, cell_name: str) -> Dict[str, Any]: + """Return the permissions dict for a connected cell.""" + links = self._load() + link = next((l for l in links if l['cell_name'] == cell_name), None) + if not link: + raise ValueError(f"Cell '{cell_name}' not found") + return link.get('permissions', _default_perms()) + def get_connection_status(self, cell_name: str) -> Dict[str, Any]: """Return link record enriched with live WireGuard handshake status.""" links = self._load() diff --git a/api/config_manager.py b/api/config_manager.py index 763dad6..9d8bdc3 100644 --- a/api/config_manager.py +++ b/api/config_manager.py @@ -235,6 +235,20 @@ class ConfigManager: for zone_file in dns_data.glob('*.zone'): shutil.copy2(zone_file, zones_dir / zone_file.name) + # Service-specific user account files (authoritative source of truth — + # cell_config.json only carries a best-effort sync of these). + svc_user_files = [ + (data_dir / 'email' / 'users.json', 'email_users.json'), + (data_dir / 'calendar' / 'users.json', 'calendar_users.json'), + (data_dir / 'calendar' / 'calendars.json', 'calendar_calendars.json'), + ] + for src, dest_name in svc_user_files: + if src.exists(): + try: + shutil.copy2(src, backup_path / dest_name) + except (PermissionError, OSError) as e: + logger.warning(f"Could not back up {src.name}: {e} (skipping)") + services = ['identity'] + list(self.service_schemas.keys()) manifest = { "backup_id": backup_id, @@ -316,6 +330,20 @@ class ConfigManager: except (PermissionError, OSError) as dir_err: logger.warning(f"Could not create dns data dir {dns_data}: {dir_err} (skipping)") + # Service-specific user account files + svc_restore_map = [ + (backup_path / 'email_users.json', data_dir / 'email' / 'users.json'), + (backup_path / 'calendar_users.json', data_dir / 'calendar' / 'users.json'), + (backup_path / 'calendar_calendars.json', data_dir / 'calendar' / 'calendars.json'), + ] + for src, dest in svc_restore_map: + if src.exists(): + try: + dest.parent.mkdir(parents=True, exist_ok=True) + shutil.copy2(src, dest) + except (PermissionError, OSError) as e: + logger.warning(f"Could not restore {dest.name}: {e} (skipping)") + self.configs = self._load_all_configs() logger.info(f"Restored configuration from backup: {backup_id}") return True diff --git a/api/firewall_manager.py b/api/firewall_manager.py index bf55c31..24d1709 100644 --- a/api/firewall_manager.py +++ b/api/firewall_manager.py @@ -221,6 +221,83 @@ def apply_all_peer_rules(peers: List[Dict[str, Any]]) -> None: }) +# --------------------------------------------------------------------------- +# Cell-to-cell firewall rules +# --------------------------------------------------------------------------- + +def _cell_tag(cell_name: str) -> str: + """iptables comment tag for cell rules — distinct prefix from pic-peer-* to prevent collision.""" + safe = re.sub(r'[^a-z0-9]', '-', cell_name.lower()) + return f'pic-cell-{safe}' + + +def clear_cell_rules(cell_name: str) -> None: + """Remove all FORWARD rules tagged for this cell (atomic save/restore).""" + tag = _cell_tag(cell_name) + comment_re = re.compile(rf'--comment\s+["\']?{re.escape(tag)}["\']?(\s|$)') + try: + save = _wg_exec(['iptables-save']) + if save.returncode != 0: + return + lines = save.stdout.splitlines() + filtered = [l for l in lines if not comment_re.search(l)] + if len(filtered) == len(lines): + return + restore_input = '\n'.join(filtered) + '\n' + restore = subprocess.run( + ['docker', 'exec', '-i', WIREGUARD_CONTAINER, 'iptables-restore'], + input=restore_input, capture_output=True, text=True, timeout=10 + ) + if restore.returncode != 0: + logger.warning(f"clear_cell_rules iptables-restore failed: {restore.stderr.strip()}") + except Exception as e: + logger.error(f"clear_cell_rules({cell_name}): {e}") + + +def apply_cell_rules(cell_name: str, vpn_subnet: str, inbound_services: List[str]) -> bool: + """Apply FORWARD rules for a cell-to-cell peer. + + Traffic from vpn_subnet is allowed only to service VIPs listed in + inbound_services; all other cell traffic is DROPped. Cells get no + internet or peer access — only explicit service VIPs. + + Rule insertion order (last inserted → top of chain): + 1. Catch-all DROP for the subnet (inserted first → bottom) + 2. Per-service ACCEPT/DROP (inserted in reversed() order → top) + """ + try: + tag = _cell_tag(cell_name) + clear_cell_rules(cell_name) + + # Catch-all DROP — inserted first so it ends up at the bottom + _iptables(['-I', 'FORWARD', '-s', vpn_subnet, + '-m', 'comment', '--comment', tag, '-j', 'DROP']) + + # Per-service rules — inserted in reverse dict order, highest-priority last + for service, svc_ip in reversed(list(SERVICE_IPS.items())): + target = 'ACCEPT' if service in inbound_services else 'DROP' + _iptables(['-I', 'FORWARD', '-s', vpn_subnet, '-d', svc_ip, + '-m', 'comment', '--comment', tag, '-j', target]) + + logger.info(f"Applied cell rules for {cell_name} ({vpn_subnet}): inbound={inbound_services}") + return True + except Exception as e: + logger.error(f"apply_cell_rules({cell_name}): {e}") + return False + + +def apply_all_cell_rules(cell_links: List[Dict[str, Any]]) -> None: + """Re-apply firewall rules for all cell connections (called on startup).""" + for link in cell_links: + name = link.get('cell_name') + subnet = link.get('vpn_subnet') + if not name or not subnet: + continue + perms = link.get('permissions', {}) + inbound = [s for s, v in perms.get('inbound', {}).items() if v] + apply_cell_rules(name, subnet, inbound) + + # --------------------------------------------------------------------------- # DNS ACL (CoreDNS Corefile generation) # --------------------------------------------------------------------------- diff --git a/api/managers.py b/api/managers.py new file mode 100644 index 0000000..a42bee2 --- /dev/null +++ b/api/managers.py @@ -0,0 +1,92 @@ +""" +Manager singletons for the PIC API. + +All service managers are instantiated here and imported by app.py. Routes in +app.py reference these by name from app's own namespace (so test patches via +`patch('app.email_manager', mock)` continue to work as before). + +Directory/path env vars: + DATA_DIR — host-mapped persistent data directory (default: /app/data) + CONFIG_DIR — host-mapped config directory (default: /app/config) +""" + +import os + +from network_manager import NetworkManager +from wireguard_manager import WireGuardManager +from peer_registry import PeerRegistry +from email_manager import EmailManager +from calendar_manager import CalendarManager +from file_manager import FileManager +from routing_manager import RoutingManager +from vault_manager import VaultManager +from container_manager import ContainerManager +from config_manager import ConfigManager +from service_bus import ServiceBus, EventType +from log_manager import LogManager +from cell_link_manager import CellLinkManager +import firewall_manager +from auth_manager import AuthManager + +DATA_DIR = os.environ.get('DATA_DIR', '/app/data') +CONFIG_DIR = os.environ.get('CONFIG_DIR', '/app/config') + +config_manager = ConfigManager( + config_file=os.path.join(CONFIG_DIR, 'cell_config.json'), + data_dir=DATA_DIR, +) +service_bus = ServiceBus() +log_manager = LogManager(log_dir='./data/logs') + +network_manager = NetworkManager(data_dir=DATA_DIR, config_dir=CONFIG_DIR) +wireguard_manager = WireGuardManager(data_dir=DATA_DIR, config_dir=CONFIG_DIR) +peer_registry = PeerRegistry(data_dir=DATA_DIR, config_dir=CONFIG_DIR) +email_manager = EmailManager(data_dir=DATA_DIR, config_dir=CONFIG_DIR) +calendar_manager = CalendarManager(data_dir=DATA_DIR, config_dir=CONFIG_DIR) +file_manager = FileManager(data_dir=DATA_DIR, config_dir=CONFIG_DIR) +routing_manager = RoutingManager(data_dir=DATA_DIR, config_dir=CONFIG_DIR) +vault_manager = VaultManager(data_dir=DATA_DIR, config_dir=CONFIG_DIR) +container_manager = ContainerManager(data_dir=DATA_DIR, config_dir=CONFIG_DIR) +cell_link_manager = CellLinkManager( + data_dir=DATA_DIR, config_dir=CONFIG_DIR, + wireguard_manager=wireguard_manager, + network_manager=network_manager, +) +auth_manager = AuthManager(data_dir=DATA_DIR, config_dir=CONFIG_DIR) + +# Service logger configuration +_service_log_configs = { + 'network': {'level': 'INFO', 'formatter': 'json', 'console': False}, + 'wireguard': {'level': 'INFO', 'formatter': 'json', 'console': False}, + 'email': {'level': 'INFO', 'formatter': 'json', 'console': False}, + 'calendar': {'level': 'INFO', 'formatter': 'json', 'console': False}, + 'files': {'level': 'INFO', 'formatter': 'json', 'console': False}, + 'routing': {'level': 'INFO', 'formatter': 'json', 'console': False}, + 'vault': {'level': 'INFO', 'formatter': 'json', 'console': False}, + 'api': {'level': 'INFO', 'formatter': 'json', 'console': True}, +} +for _svc, _cfg in _service_log_configs.items(): + log_manager.add_service_logger(_svc, _cfg) + +# Apply any persisted log level overrides +import json as _json +_levels_file = os.path.join(os.path.dirname(__file__), 'config', 'log_levels.json') +if os.path.exists(_levels_file): + try: + with open(_levels_file) as _lf: + for _s, _l in _json.load(_lf).items(): + log_manager.set_service_level(_s, _l) + except Exception: + pass + +service_bus.start() + +__all__ = [ + 'config_manager', 'service_bus', 'log_manager', + 'network_manager', 'wireguard_manager', 'peer_registry', + 'email_manager', 'calendar_manager', 'file_manager', + 'routing_manager', 'vault_manager', 'container_manager', + 'cell_link_manager', 'auth_manager', + 'firewall_manager', 'EventType', + 'DATA_DIR', 'CONFIG_DIR', +] diff --git a/api/network_manager.py b/api/network_manager.py index 5cdaf6d..ba1d1bb 100644 --- a/api/network_manager.py +++ b/api/network_manager.py @@ -476,12 +476,18 @@ class NetworkManager(BaseServiceManager): if os.path.exists(src): with open(src) as f: zone_content = f.read() + # Try $ORIGIN first, then fall back to SOA MNAME m = re.search(r'^\$ORIGIN\s+(\S+)', zone_content, re.MULTILINE) - old_origin = m.group(1).rstrip('.') if m else None + if m: + old_origin = m.group(1).rstrip('.') + else: + m2 = re.search(r'^@\s+IN\s+SOA\s+(\S+?)\.?\s', zone_content, re.MULTILINE) + old_origin = m2.group(1).rstrip('.') if m2 else None if old_origin and old_origin != domain: zone_content = zone_content.replace(f'{old_origin}.', f'{domain}.') - zone_content = re.sub( - r'^\$ORIGIN\s+\S+', f'$ORIGIN {domain}.', zone_content, flags=re.MULTILINE) + if re.search(r'^\$ORIGIN\s+', zone_content, re.MULTILINE): + zone_content = re.sub( + r'^\$ORIGIN\s+\S+', f'$ORIGIN {domain}.', zone_content, flags=re.MULTILINE) with open(dst, 'w') as f: f.write(zone_content) for zone_path in zone_files: @@ -507,11 +513,15 @@ class NetworkManager(BaseServiceManager): """Update the cell hostname record in the primary DNS zone file. reload=False writes the zone file only — use when deferring container restart. + old_name is a hint; if it's absent from the zone file, we detect the actual + hostname by finding the non-service A record pointing to the Caddy IP. """ restarted = [] warnings = [] - if not old_name or not new_name or old_name == new_name: + if not new_name: return {'restarted': restarted, 'warnings': warnings} + _service_names = {'api', 'webui', 'calendar', 'files', 'mail', 'webmail', 'webdav'} + changed = False try: dns_data = os.path.join(self.data_dir, 'dns') if os.path.isdir(dns_data): @@ -520,16 +530,33 @@ class NetworkManager(BaseServiceManager): zone_file = os.path.join(dns_data, fname) with open(zone_file) as f: content = f.read() - # Match name with optional TTL: "name [ttl] IN A value" - content = re.sub( - rf'^{re.escape(old_name)}(\s+(?:\d+\s+)?IN\s+A\s+)', + # Determine which name to replace: prefer old_name if present, + # otherwise detect from zone (non-service A record not in _service_names) + actual_old = old_name if ( + old_name and re.search( + rf'^{re.escape(old_name)}\s', content, re.MULTILINE) + ) else None + if actual_old is None: + for m in re.finditer( + r'^(\S+)\s+(?:\d+\s+)?IN\s+A\s+\S+', content, re.MULTILINE + ): + candidate = m.group(1) + if candidate not in _service_names and candidate != '@': + actual_old = candidate + break + if actual_old is None or actual_old == new_name: + break + new_content = re.sub( + rf'^{re.escape(actual_old)}(\s+(?:\d+\s+)?IN\s+A\s+)', f'{new_name}\\1', content, flags=re.MULTILINE ) - with open(zone_file, 'w') as f: - f.write(content) + if new_content != content: + with open(zone_file, 'w') as f: + f.write(new_content) + changed = True break - if reload: + if changed and reload: self._reload_dns_service() restarted.append('cell-dns (reloaded)') except Exception as e: diff --git a/api/routes/__init__.py b/api/routes/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/api/routes/calendar.py b/api/routes/calendar.py new file mode 100644 index 0000000..8952401 --- /dev/null +++ b/api/routes/calendar.py @@ -0,0 +1,119 @@ +import logging +from flask import Blueprint, request, jsonify +logger = logging.getLogger('picell') +bp = Blueprint('calendar', __name__) + +@bp.route('/api/calendar/users', methods=['GET']) +def get_calendar_users(): + """Get calendar users.""" + try: + from app import calendar_manager + users = calendar_manager.get_users() + return jsonify(users) + except Exception as e: + logger.error(f"Error getting calendar users: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/calendar/users', methods=['POST']) +def create_calendar_user(): + """Create calendar user.""" + try: + from app import calendar_manager + data = request.get_json(silent=True) + if data is None: + return jsonify({"error": "No data provided"}), 400 + username = data.get('username') + password = data.get('password') + if not username or not password: + return jsonify({"error": "Missing required fields: username, password"}), 400 + result = calendar_manager.create_calendar_user(username, password) + return jsonify({"created": result}) + except Exception as e: + logger.error(f"Error creating calendar user: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/calendar/users/', methods=['DELETE']) +def delete_calendar_user(username): + """Delete calendar user.""" + try: + from app import calendar_manager + result = calendar_manager.delete_calendar_user(username) + return jsonify({"deleted": result}) + except Exception as e: + logger.error(f"Error deleting calendar user: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/calendar/calendars', methods=['POST']) +def create_calendar(): + """Create calendar.""" + try: + from app import calendar_manager + data = request.get_json(silent=True) + if data is None: + return jsonify({"error": "No data provided"}), 400 + username = data.get('username') + calendar_name = data.get('name') or data.get('calendar_name') + if not username or not calendar_name: + return jsonify({"error": "Missing required fields: username, name"}), 400 + result = calendar_manager.create_calendar( + username, + calendar_name, + description=data.get('description', ''), + color=data.get('color', '#4285f4'), + ) + return jsonify({"created": result}) + except Exception as e: + logger.error(f"Error creating calendar: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/calendar/events', methods=['POST']) +def add_calendar_event(): + try: + from app import calendar_manager + data = request.get_json(silent=True) + if data is None: + return jsonify({"error": "No data provided"}), 400 + username = data.get('username') + calendar_name = data.get('calendar_name') or data.get('calendar') + if not username or not calendar_name: + return jsonify({"error": "Missing required fields: username, calendar_name"}), 400 + event_data = {k: v for k, v in data.items() if k not in ('username', 'calendar_name', 'calendar')} + result = calendar_manager.add_event(username, calendar_name, event_data) + return jsonify({"created": result}) + except Exception as e: + logger.error(f"Error adding calendar event: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/calendar/events//', methods=['GET']) +def get_calendar_events(username, calendar_name): + """Get calendar events.""" + try: + from app import calendar_manager + params = request.args.to_dict() + result = calendar_manager.get_events(username, calendar_name, params) + return jsonify(result) + except Exception as e: + logger.error(f"Error getting calendar events: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/calendar/status', methods=['GET']) +def get_calendar_status(): + """Get calendar service status.""" + try: + from app import calendar_manager + status = calendar_manager.get_status() + return jsonify(status) + except Exception as e: + logger.error(f"Error getting calendar status: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/calendar/connectivity', methods=['GET']) +def test_calendar_connectivity(): + """Test calendar connectivity.""" + try: + from app import calendar_manager + result = calendar_manager.test_connectivity() + return jsonify(result) + except Exception as e: + logger.error(f"Error testing calendar connectivity: {e}") + return jsonify({"error": str(e)}), 500 diff --git a/api/routes/cells.py b/api/routes/cells.py new file mode 100644 index 0000000..79c8add --- /dev/null +++ b/api/routes/cells.py @@ -0,0 +1,126 @@ +import logging +import os +from flask import Blueprint, request, jsonify +from cell_link_manager import VALID_SERVICES +logger = logging.getLogger('picell') +bp = Blueprint('cells', __name__) + +@bp.route('/api/cells/invite', methods=['GET']) +def get_cell_invite(): + try: + from app import cell_link_manager, config_manager + identity = config_manager.configs.get('_identity', {}) + cell_name = identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell')) + domain = identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell')) + return jsonify(cell_link_manager.generate_invite(cell_name, domain)) + except Exception as e: + logger.error(f"Error generating cell invite: {e}") + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/cells/services', methods=['GET']) +def list_shareable_services(): + """Return the list of services that can be shared between cells.""" + try: + from firewall_manager import SERVICE_IPS + return jsonify({'services': list(SERVICE_IPS.keys())}) + except Exception as e: + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/cells', methods=['GET']) +def list_cell_connections(): + try: + from app import cell_link_manager + return jsonify(cell_link_manager.list_connections()) + except Exception as e: + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/cells', methods=['POST']) +def add_cell_connection(): + try: + from app import cell_link_manager + data = request.get_json(silent=True) + if not data: + return jsonify({'error': 'No data provided'}), 400 + for field in ('cell_name', 'public_key', 'vpn_subnet', 'dns_ip', 'domain'): + if field not in data: + return jsonify({'error': f'Missing field: {field}'}), 400 + inbound_services = data.get('inbound_services', []) + link = cell_link_manager.add_connection(data, inbound_services=inbound_services) + return jsonify({'message': f"Connected to cell '{data['cell_name']}'", 'link': link}), 201 + except ValueError as e: + return jsonify({'error': str(e)}), 400 + except RuntimeError as e: + return jsonify({'error': str(e)}), 400 + except Exception as e: + logger.error(f"Error adding cell connection: {e}") + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/cells/', methods=['DELETE']) +def remove_cell_connection(cell_name): + try: + from app import cell_link_manager + cell_link_manager.remove_connection(cell_name) + return jsonify({'message': f"Cell '{cell_name}' disconnected"}) + except ValueError as e: + return jsonify({'error': str(e)}), 404 + except Exception as e: + logger.error(f"Error removing cell connection: {e}") + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/cells//status', methods=['GET']) +def get_cell_connection_status(cell_name): + try: + from app import cell_link_manager + return jsonify(cell_link_manager.get_connection_status(cell_name)) + except ValueError as e: + return jsonify({'error': str(e)}), 404 + except Exception as e: + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/cells//permissions', methods=['GET']) +def get_cell_permissions(cell_name): + try: + from app import cell_link_manager + perms = cell_link_manager.get_permissions(cell_name) + return jsonify(perms) + except ValueError as e: + return jsonify({'error': str(e)}), 404 + except Exception as e: + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/cells//permissions', methods=['PUT']) +def update_cell_permissions(cell_name): + try: + from app import cell_link_manager, firewall_manager, peer_registry + from app import COREFILE_PATH + data = request.get_json(silent=True) + if not data: + return jsonify({'error': 'No data provided'}), 400 + + # Validate service names in inbound/outbound + for direction in ('inbound', 'outbound'): + for service in data.get(direction, {}): + if service not in VALID_SERVICES: + return jsonify({'error': f'Unknown service: {service!r}'}), 400 + + inbound = data.get('inbound', {}) + outbound = data.get('outbound', {}) + link = cell_link_manager.update_permissions(cell_name, inbound, outbound) + + # Regenerate Corefile so outbound DNS changes take effect + try: + from app import config_manager + domain = config_manager.configs.get('_identity', {}).get('domain', 'cell') + peers = peer_registry.list_peers() + cell_links = cell_link_manager.list_connections() + firewall_manager.apply_all_dns_rules(peers, COREFILE_PATH, domain, + cell_links=cell_links) + except Exception as e: + logger.warning(f"DNS regen after permission update failed (non-fatal): {e}") + + return jsonify({'message': f"Permissions updated for '{cell_name}'", 'link': link}) + except ValueError as e: + return jsonify({'error': str(e)}), 404 + except Exception as e: + logger.error(f"Error updating cell permissions: {e}") + return jsonify({'error': str(e)}), 500 diff --git a/api/routes/config.py b/api/routes/config.py new file mode 100644 index 0000000..8732e3b --- /dev/null +++ b/api/routes/config.py @@ -0,0 +1,673 @@ +import io +import os +import re +import copy +import json +import ipaddress +import zipfile +import shutil +import logging +import threading +from datetime import datetime +from flask import Blueprint, request, jsonify, send_file, current_app +logger = logging.getLogger('picell') +bp = Blueprint('config', __name__) + + +# --------------------------------------------------------------------------- +# Pending-restart helpers +# --------------------------------------------------------------------------- + +def _collect_service_ports(configs: dict) -> dict: + """Extract current port values from service configs for .env generation.""" + from app import config_manager as _cm + ports = {} + net = configs.get('network', {}) + wg = configs.get('wireguard', {}) + email = configs.get('email', {}) + cal = configs.get('calendar', {}) + files = configs.get('files', {}) + identity = configs.get('_identity', {}) + + if 'dns_port' in net: ports['dns_port'] = net['dns_port'] + if 'port' in wg: ports['wg_port'] = wg['port'] + elif 'wireguard_port' in identity: ports['wg_port'] = identity['wireguard_port'] + if 'smtp_port' in email: ports['mail_smtp_port'] = email['smtp_port'] + if 'submission_port' in email: ports['mail_submission_port'] = email['submission_port'] + if 'imap_port' in email: ports['mail_imap_port'] = email['imap_port'] + if 'webmail_port' in email: ports['rainloop_port'] = email['webmail_port'] + if 'port' in cal: ports['radicale_port'] = cal['port'] + if 'port' in files: ports['webdav_port'] = files['port'] + if 'manager_port' in files: ports['filegator_port'] = files['manager_port'] + return ports + + +def _dedup_changes(existing: list, new: list) -> list: + """Merge change lists, keeping only the latest entry per config key.""" + def key_of(msg: str) -> str: + if ' changed' in msg: + return msg.split(' changed')[0].strip() + if ':' in msg: + return msg.split(':')[0].strip() + return msg + merged = {key_of(c): c for c in existing} + merged.update({key_of(c): c for c in new}) + return list(merged.values()) + + +def _set_pending_restart(changes: list, containers: list = None, network_recreate: bool = False, + pre_change_snapshot: dict = None): + """Record that specific containers need to be restarted to apply configuration.""" + from app import config_manager + existing = config_manager.configs.get('_pending_restart', {}) + existing_changes = existing.get('changes', []) if existing.get('needs_restart') else [] + existing_containers = existing.get('containers', []) if existing.get('needs_restart') else [] + + if not existing.get('needs_restart'): + snapshot = pre_change_snapshot or {} + else: + snapshot = existing.get('_snapshot', {}) + + if containers is None or '*' in (containers or []) or existing_containers == ['*']: + new_containers = ['*'] + else: + new_containers = list(set(existing_containers) | set(containers)) + + config_manager.configs['_pending_restart'] = { + 'needs_restart': True, + 'changed_at': datetime.utcnow().isoformat(), + 'changes': _dedup_changes(existing_changes, changes), + 'containers': new_containers, + 'network_recreate': network_recreate or existing.get('network_recreate', False), + '_snapshot': snapshot, + } + config_manager._save_all_configs() + + +def _clear_pending_restart(): + from app import config_manager + config_manager.configs['_pending_restart'] = { + 'needs_restart': False, 'changes': [], 'containers': [], 'network_recreate': False + } + config_manager._save_all_configs() + + +# --------------------------------------------------------------------------- +# Config routes +# --------------------------------------------------------------------------- + +@bp.route('/api/config', methods=['GET']) +def get_config(): + try: + from app import config_manager + import ip_utils as _ip_utils_cfg + service_configs = config_manager.get_all_configs() + identity = service_configs.pop('_identity', {}) + config = { + 'cell_name': identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell')), + 'domain': identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell')), + 'ip_range': identity.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')), + 'wireguard_port': identity.get('wireguard_port', int(os.environ.get('WG_PORT', '51820'))), + } + _ips = _ip_utils_cfg.get_service_ips(config['ip_range']) + config['service_ips'] = { + 'dns': _ips['dns'], + 'vip_mail': _ips['vip_mail'], + 'vip_calendar': _ips['vip_calendar'], + 'vip_files': _ips['vip_files'], + 'vip_webdav': _ips['vip_webdav'], + } + config['service_configs'] = service_configs + return jsonify(config) + except Exception as e: + logger.error(f"Error getting config: {e}") + return jsonify({"error": str(e)}), 500 + + +@bp.route('/api/config', methods=['PUT']) +def update_config(): + try: + from app import (config_manager, network_manager, wireguard_manager, email_manager, + calendar_manager, file_manager, routing_manager, + peer_registry, firewall_manager, service_bus, EventType, detect_conflicts) + import ip_utils + data = request.get_json(silent=True) + if data is None: + return jsonify({"error": "No data provided"}), 400 + + identity_keys = {'cell_name', 'domain', 'ip_range', 'wireguard_port'} + identity_updates = {k: v for k, v in data.items() if k in identity_keys} + + _CELL_NAME_RE = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9-]{0,254}$') + _DOMAIN_RE = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9.-]{0,254}$') + + if 'cell_name' in identity_updates: + v = str(identity_updates['cell_name']) + if not v: + return jsonify({'error': 'cell_name cannot be empty'}), 400 + if len(v) > 255: + return jsonify({'error': 'cell_name must be 255 characters or fewer'}), 400 + if not _CELL_NAME_RE.match(v): + return jsonify({'error': 'Invalid cell_name: use only letters, digits, hyphens'}), 400 + + if 'domain' in identity_updates: + v = str(identity_updates['domain']) + if not v: + return jsonify({'error': 'domain cannot be empty'}), 400 + if len(v) > 255: + return jsonify({'error': 'domain must be 255 characters or fewer'}), 400 + if not _DOMAIN_RE.match(v): + return jsonify({'error': 'Invalid domain: use only letters, digits, hyphens, dots'}), 400 + + if 'ip_range' in identity_updates: + _rfc1918 = [ + ipaddress.ip_network('10.0.0.0/8'), + ipaddress.ip_network('172.16.0.0/12'), + ipaddress.ip_network('192.168.0.0/16'), + ] + try: + _raw = str(identity_updates['ip_range']) + if '/' not in _raw: + return jsonify({'error': 'ip_range must include a CIDR prefix (e.g. 172.20.0.0/16)'}), 400 + _net = ipaddress.ip_network(_raw, strict=False) + if not any(_net.subnet_of(r) for r in _rfc1918): + return jsonify({'error': ( + 'ip_range must be within an RFC-1918 private range ' + '(10.0.0.0/8, 172.16.0.0/12, or 192.168.0.0/16)' + )}), 400 + except ValueError as _e: + return jsonify({'error': f'Invalid ip_range: {_e}'}), 400 + + _port_fields = { + 'network': ['dns_port'], + 'wireguard': ['port'], + 'email': ['smtp_port', 'submission_port', 'imap_port', 'webmail_port'], + 'calendar': ['port'], + 'files': ['port', 'manager_port'], + } + for _svc, _fields in _port_fields.items(): + if _svc not in data: + continue + _svc_data = data[_svc] + if not isinstance(_svc_data, dict): + continue + for _f in _fields: + if _f in _svc_data and _svc_data[_f] is not None and _svc_data[_f] != '': + try: + _p = int(_svc_data[_f]) + if not (1 <= _p <= 65535): + raise ValueError() + except (ValueError, TypeError): + return jsonify({'error': f'{_svc}.{_f} must be an integer between 1 and 65535'}), 400 + + _conflicts = detect_conflicts(config_manager.configs, data) + if _conflicts: + _msgs = [ + f"port {_c['port']} is used by {', '.join(f'{_s}.{_f}' for _s, _f in _c['conflicts'])}" + for _c in _conflicts + ] + return jsonify({'error': 'Port conflict: ' + '; '.join(_msgs)}), 409 + + if 'wireguard' in data and isinstance(data['wireguard'], dict): + _addr = data['wireguard'].get('address') + if _addr: + if '/' not in str(_addr): + return jsonify({'error': 'wireguard.address must include a prefix length (e.g. 10.0.0.1/24)'}), 400 + try: + ipaddress.ip_interface(_addr) + except ValueError as _e: + return jsonify({'error': f'wireguard.address is not a valid IP/CIDR: {_e}'}), 400 + + old_identity = dict(config_manager.configs.get('_identity', {})) + old_svc_configs = { + svc: dict(config_manager.configs.get(svc, {})) + for svc in data if svc in config_manager.service_schemas + } + _pre_change_snapshot = {k: copy.deepcopy(v) for k, v in config_manager.configs.items() + if not k.startswith('_')} + _pre_change_snapshot['_identity'] = copy.deepcopy(config_manager.configs.get('_identity', {})) + + if identity_updates: + stored = config_manager.configs.get('_identity', {}) + stored.update(identity_updates) + config_manager.configs['_identity'] = stored + config_manager._save_all_configs() + + _svc_managers = { + 'network': network_manager, + 'wireguard': wireguard_manager, + 'email': email_manager, + 'calendar': calendar_manager, + 'files': file_manager, + 'routing': routing_manager, + 'vault': current_app.vault_manager, + } + + all_restarted = [] + all_warnings = [] + + for service, config in data.items(): + if service in config_manager.service_schemas: + config_manager.update_service_config(service, config) + mgr = _svc_managers.get(service) + if mgr: + mgr.update_config(config) + result = mgr.apply_config(config) + all_restarted.extend(result.get('restarted', [])) + all_warnings.extend(result.get('warnings', [])) + service_bus.publish_event(EventType.CONFIG_CHANGED, service, { + 'service': service, 'config': config + }) + if service == 'wireguard' and ('port' in config or 'address' in config): + for p in peer_registry.list_peers(): + peer_registry.update_peer(p['peer'], {'config_needs_reinstall': True}) + n = len(peer_registry.list_peers()) + if n: + all_warnings.append(f'WireGuard endpoint changed — {n} peer(s) must reinstall VPN config') + if 'port' in config: + _id = config_manager.configs.get('_identity', {}) + _id['wireguard_port'] = config['port'] + config_manager.configs['_identity'] = _id + config_manager._save_all_configs() + + if identity_updates.get('domain') and identity_updates['domain'] != old_identity.get('domain', ''): + domain = identity_updates['domain'] + net_result = network_manager.apply_domain(domain, reload=False) + all_warnings.extend(net_result.get('warnings', [])) + _cur_id = config_manager.configs.get('_identity', {}) + ip_utils.write_caddyfile( + _cur_id.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')), + _cur_id.get('cell_name', os.environ.get('CELL_NAME', 'mycell')), + domain, '/app/config-caddy/Caddyfile' + ) + _set_pending_restart( + [f'domain changed to {domain}'], + ['dns', 'caddy'], + pre_change_snapshot=_pre_change_snapshot, + ) + + if identity_updates.get('cell_name'): + old_name = old_identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell')) + new_name = identity_updates['cell_name'] + if old_name != new_name: + cn_result = network_manager.apply_cell_name(old_name, new_name, reload=False) + all_warnings.extend(cn_result.get('warnings', [])) + _cur_id2 = config_manager.configs.get('_identity', {}) + ip_utils.write_caddyfile( + _cur_id2.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')), + new_name, + identity_updates.get('domain') or _cur_id2.get('domain', os.environ.get('CELL_DOMAIN', 'cell')), + '/app/config-caddy/Caddyfile' + ) + _set_pending_restart( + [f'cell_name changed to {new_name}'], + ['dns'], + pre_change_snapshot=_pre_change_snapshot, + ) + + if identity_updates.get('ip_range') and identity_updates['ip_range'] != old_identity.get('ip_range', ''): + new_range = identity_updates['ip_range'] + cur_identity = config_manager.configs.get('_identity', {}) + cur_cell_name = cur_identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell')) + cur_domain = cur_identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell')) + ip_result = network_manager.apply_ip_range(new_range, cur_cell_name, cur_domain) + all_restarted.extend(ip_result.get('restarted', [])) + all_warnings.extend(ip_result.get('warnings', [])) + firewall_manager.update_service_ips(new_range) + firewall_manager.ensure_caddy_virtual_ips() + env_file = os.environ.get('COMPOSE_ENV_FILE', '/app/.env.compose') + ip_utils.write_env_file(new_range, env_file, _collect_service_ports(config_manager.configs)) + ip_utils.write_caddyfile(new_range, cur_cell_name, cur_domain, '/app/config-caddy/Caddyfile') + _set_pending_restart( + [f'ip_range changed to {new_range} — network will be recreated'], + ['*'], network_recreate=True, + pre_change_snapshot=_pre_change_snapshot, + ) + + _PORT_CHANGE_MAP = { + ('network', 'dns_port'): ('dns_port', ['dns']), + ('wireguard','port'): ('wg_port', ['wireguard']), + ('email', 'smtp_port'): ('mail_smtp_port', ['mail']), + ('email', 'submission_port'): ('mail_submission_port', ['mail']), + ('email', 'imap_port'): ('mail_imap_port', ['mail']), + ('email', 'webmail_port'): ('rainloop_port', ['rainloop']), + ('calendar', 'port'): ('radicale_port', ['radicale']), + ('files', 'port'): ('webdav_port', ['webdav']), + ('files', 'manager_port'): ('filegator_port', ['filegator']), + } + + port_changed_containers = set() + port_change_messages = [] + + for (svc_key, field), (_env_key, containers) in _PORT_CHANGE_MAP.items(): + if svc_key in data and field in data[svc_key]: + default_val = ip_utils.PORT_DEFAULTS.get(_env_key) + old_val = old_svc_configs.get(svc_key, {}).get(field, default_val) + new_val = data[svc_key][field] + if old_val != new_val: + port_changed_containers.update(containers) + port_change_messages.append(f'{svc_key} {field}: {old_val} → {new_val}') + + if 'wireguard_port' in identity_updates: + old_wg = old_identity.get('wireguard_port', ip_utils.PORT_DEFAULTS.get('wg_port', 51820)) + new_wg = identity_updates['wireguard_port'] + if old_wg != new_wg: + _wg_svc = config_manager.configs.get('wireguard', {}) + _wg_svc['port'] = new_wg + config_manager.update_service_config('wireguard', _wg_svc) + wireguard_manager.apply_config({'port': new_wg}) + port_changed_containers.add('wireguard') + port_change_messages.append(f'wireguard_port: {old_wg} → {new_wg}') + + if port_changed_containers: + env_file = os.environ.get('COMPOSE_ENV_FILE', '/app/.env.compose') + _ip_range = config_manager.configs.get('_identity', {}).get( + 'ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16') + ) + ip_utils.write_env_file(_ip_range, env_file, _collect_service_ports(config_manager.configs)) + _set_pending_restart(port_change_messages, list(port_changed_containers), + pre_change_snapshot=_pre_change_snapshot) + + logger.info(f"Updated config, restarted: {all_restarted}") + return jsonify({ + "message": "Configuration updated and applied", + "restarted": all_restarted, + "warnings": all_warnings, + }) + except Exception as e: + logger.error(f"Error updating config: {e}") + return jsonify({"error": str(e)}), 500 + + +@bp.route('/api/config/pending', methods=['GET']) +def get_pending_config(): + from app import config_manager + pending = config_manager.configs.get('_pending_restart', {}) + return jsonify({ + 'needs_restart': pending.get('needs_restart', False), + 'applying': pending.get('applying', False), + 'changed_at': pending.get('changed_at'), + 'changes': pending.get('changes', []), + 'containers': pending.get('containers', ['*']), + }) + + +@bp.route('/api/config/pending', methods=['DELETE']) +def cancel_pending_config(): + from app import config_manager, network_manager + import ip_utils as _ip_revert + pending = config_manager.configs.get('_pending_restart', {}) + snapshot = pending.get('_snapshot', {}) + if snapshot: + cur_identity = dict(config_manager.configs.get('_identity', {})) + old_identity = snapshot.get('_identity', {}) + + for k, v in snapshot.items(): + config_manager.configs[k] = v + + _id = config_manager.configs.get('_identity', {}) + _dom = _id.get('domain', os.environ.get('CELL_DOMAIN', 'cell')) + + cur_domain = cur_identity.get('domain', '') + old_domain = old_identity.get('domain', '') + if cur_domain and old_domain and cur_domain != old_domain: + network_manager.apply_domain(old_domain, reload=False) + + cur_cell_name = cur_identity.get('cell_name', '') + old_cell_name = old_identity.get('cell_name', '') + if cur_cell_name and old_cell_name and cur_cell_name != old_cell_name: + network_manager.apply_cell_name(cur_cell_name, old_cell_name, reload=False) + + _ip_revert.write_caddyfile( + _id.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')), + _id.get('cell_name', os.environ.get('CELL_NAME', 'mycell')), + _dom, '/app/config-caddy/Caddyfile' + ) + + _clear_pending_restart() + return jsonify({'message': 'Pending changes discarded'}) + + +@bp.route('/api/config/apply', methods=['POST']) +def apply_pending_config(): + try: + from app import config_manager + pending = config_manager.configs.get('_pending_restart', {}) + if not pending.get('needs_restart'): + return jsonify({'message': 'No pending changes to apply'}) + + project_dir = '/home/roof/pic' + api_image = 'pic_api:latest' + data_host_path = '/home/roof/pic/data/api' + try: + import docker as _docker_sdk + _client = _docker_sdk.from_env() + _self = _client.containers.get('cell-api') + project_dir = _self.labels.get('com.docker.compose.project.working_dir', project_dir) + tags = _self.image.tags + if tags: + api_image = tags[0] + for _m in _self.attrs.get('Mounts', []): + if _m.get('Destination') == '/app/data': + data_host_path = _m.get('Source', data_host_path) + break + except Exception: + pass + + containers = pending.get('containers', ['*']) + needs_network_recreate = pending.get('network_recreate', False) + + host_env = os.path.join(project_dir, '.env') + host_compose = os.path.join(project_dir, 'docker-compose.yml') + + if '*' in containers: + config_manager.configs['_pending_restart']['applying'] = True + config_manager._save_all_configs() + + import base64 as _b64 + _clear_py = ( + "import json,os; p='/app/data/cell_config.json';" + "f=open(p); d=json.load(f); f.close();" + "d['_pending_restart']={'needs_restart':False,'changes':[],'containers':[],'network_recreate':False};" + "tmp=p+'.tmp'; open(tmp,'w').write(json.dumps(d,indent=2)); os.replace(tmp,p)" + ) + _b64_cmd = _b64.b64encode(_clear_py.encode()).decode() + clear_flag_cmd = f"python3 -c \"import base64; exec(base64.b64decode('{_b64_cmd}').decode())\"" + + if needs_network_recreate: + helper_script = ( + f'sleep 2' + f' && docker compose --project-directory {project_dir}' + f' -f {host_compose} --env-file {host_env} down' + f' && {clear_flag_cmd}' + f' && docker compose --project-directory {project_dir}' + f' -f {host_compose} --env-file {host_env} up -d' + ) + else: + helper_script = ( + f'sleep 2' + f' && {clear_flag_cmd}' + f' && docker compose --project-directory {project_dir}' + f' -f {host_compose} --env-file {host_env} up -d' + ) + + def _do_apply(): + import subprocess as _subprocess + _subprocess.Popen( + ['docker', 'run', '--rm', + '-v', '/var/run/docker.sock:/var/run/docker.sock', + '-v', f'{project_dir}:{project_dir}', + '-v', f'{data_host_path}:/app/data', + '--entrypoint', 'sh', + api_image, + '-c', helper_script], + close_fds=True, + stdout=_subprocess.DEVNULL, + stderr=_subprocess.DEVNULL, + ) + logger.info( + 'spawned helper container for all-services restart' + + (' (network_recreate)' if needs_network_recreate else '') + ) + else: + def _do_apply(): + import time as _time + import subprocess as _subprocess + _time.sleep(0.3) + result = _subprocess.run( + ['docker', 'compose', + '--project-directory', project_dir, + '-f', '/app/docker-compose.yml', + '--env-file', '/app/.env.compose', + 'up', '-d', '--no-deps', '--force-recreate'] + containers, + capture_output=True, text=True, timeout=120, + ) + if result.returncode != 0: + logger.error(f"docker compose up failed: {result.stderr.strip()}") + else: + logger.info(f'docker compose up completed for: {containers}') + _clear_pending_restart() + + threading.Thread(target=_do_apply, daemon=False).start() + return jsonify({ + 'message': 'Applying configuration — containers are restarting', + 'restart_in_progress': True, + }) + except Exception as e: + logger.error(f"Error applying config: {e}") + return jsonify({'error': str(e)}), 500 + + +@bp.route('/api/config/backup', methods=['POST']) +def create_config_backup(): + try: + from app import config_manager, service_bus, EventType + backup_id = config_manager.backup_config() + service_bus.publish_event(EventType.BACKUP_CREATED, 'api', { + 'backup_id': backup_id, + 'timestamp': datetime.utcnow().isoformat() + }) + return jsonify({"backup_id": backup_id}) + except Exception as e: + logger.error(f"Error creating backup: {e}") + return jsonify({"error": str(e)}), 500 + + +@bp.route('/api/config/backups', methods=['GET']) +def list_config_backups(): + try: + from app import config_manager + return jsonify(config_manager.list_backups()) + except Exception as e: + logger.error(f"Error listing backups: {e}") + return jsonify({"error": str(e)}), 500 + + +@bp.route('/api/config/restore/', methods=['POST']) +def restore_config(backup_id): + try: + from app import config_manager, service_bus, EventType + data = request.get_json(silent=True) or {} + success = config_manager.restore_config(backup_id, services=data.get('services')) + if success: + service_bus.publish_event(EventType.RESTORE_COMPLETED, 'api', { + 'backup_id': backup_id, + 'timestamp': datetime.utcnow().isoformat() + }) + return jsonify({"message": f"Configuration restored from backup: {backup_id}"}) + return jsonify({"error": f"Failed to restore backup: {backup_id}"}), 500 + except Exception as e: + logger.error(f"Error restoring backup: {e}") + return jsonify({"error": str(e)}), 500 + + +@bp.route('/api/config/export', methods=['GET']) +def export_config(): + try: + from app import config_manager + format = request.args.get('format', 'json') + config_data = config_manager.export_config(format) + return jsonify({"config": config_data, "format": format}) + except Exception as e: + logger.error(f"Error exporting config: {e}") + return jsonify({"error": str(e)}), 500 + + +@bp.route('/api/config/import', methods=['POST']) +def import_config(): + try: + from app import config_manager + data = request.get_json(silent=True) + if data is None: + return jsonify({"error": "No data provided"}), 400 + success = config_manager.import_config(data.get('config'), data.get('format', 'json')) + if success: + return jsonify({"message": "Configuration imported successfully"}) + return jsonify({"error": "Failed to import configuration"}), 500 + except Exception as e: + logger.error(f"Error importing config: {e}") + return jsonify({"error": str(e)}), 500 + + +@bp.route('/api/config/backups//download', methods=['GET']) +def download_backup(backup_id): + try: + from app import config_manager + backup_path = config_manager.backup_dir / backup_id + if not backup_path.exists(): + return jsonify({'error': f'Backup {backup_id} not found'}), 404 + buf = io.BytesIO() + with zipfile.ZipFile(buf, 'w', zipfile.ZIP_DEFLATED) as zf: + for f in backup_path.rglob('*'): + if f.is_file(): + zf.write(f, f.relative_to(backup_path)) + buf.seek(0) + return send_file(buf, mimetype='application/zip', + as_attachment=True, + download_name=f'{backup_id}.zip') + except Exception as e: + logger.error(f"Error downloading backup: {e}") + return jsonify({'error': str(e)}), 500 + + +@bp.route('/api/config/backup/upload', methods=['POST']) +def upload_backup(): + try: + from app import config_manager + if 'file' not in request.files: + return jsonify({'error': 'No file provided'}), 400 + f = request.files['file'] + filename = f.filename or '' + if filename.endswith('.zip'): + backup_id = filename[:-4] + else: + backup_id = f"backup_{datetime.utcnow().strftime('%Y%m%d_%H%M%S')}" + backup_id = ''.join(c for c in backup_id if c.isalnum() or c == '_') + backup_path = config_manager.backup_dir / backup_id + backup_path.mkdir(parents=True, exist_ok=True) + try: + with zipfile.ZipFile(io.BytesIO(f.read())) as zf: + zf.extractall(backup_path) + except zipfile.BadZipFile: + shutil.rmtree(backup_path, ignore_errors=True) + return jsonify({'error': 'Invalid zip file'}), 400 + if not (backup_path / 'manifest.json').exists(): + shutil.rmtree(backup_path, ignore_errors=True) + return jsonify({'error': 'Invalid backup: missing manifest.json'}), 400 + return jsonify({'backup_id': backup_id}) + except Exception as e: + logger.error(f"Error uploading backup: {e}") + return jsonify({'error': str(e)}), 500 + + +@bp.route('/api/config/backups/', methods=['DELETE']) +def delete_config_backup(backup_id): + try: + from app import config_manager + success = config_manager.delete_backup(backup_id) + if success: + return jsonify({"message": f"Backup {backup_id} deleted"}) + return jsonify({"error": f"Failed to delete backup {backup_id}"}), 500 + except Exception as e: + logger.error(f"Error deleting backup: {e}") + return jsonify({"error": str(e)}), 500 diff --git a/api/routes/containers.py b/api/routes/containers.py new file mode 100644 index 0000000..4393c39 --- /dev/null +++ b/api/routes/containers.py @@ -0,0 +1,195 @@ +import logging +import os +from flask import Blueprint, request, jsonify, current_app +logger = logging.getLogger('picell') +bp = Blueprint('containers', __name__) + +@bp.route('/api/containers', methods=['GET']) +def list_containers(): + try: + from app import container_manager, is_local_request + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 + return jsonify(container_manager.list_containers()) + except Exception as e: + logger.error(f"Error listing containers: {e}") + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/containers//start', methods=['POST']) +def start_container(name): + try: + from app import container_manager, is_local_request + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 + return jsonify({'started': container_manager.start_container(name)}) + except Exception as e: + logger.error(f"Error starting container {name}: {e}") + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/containers//stop', methods=['POST']) +def stop_container(name): + try: + from app import container_manager, is_local_request + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 + return jsonify({'stopped': container_manager.stop_container(name)}) + except Exception as e: + logger.error(f"Error stopping container {name}: {e}") + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/containers//restart', methods=['POST']) +def restart_container(name): + try: + from app import container_manager, is_local_request + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 + return jsonify({'restarted': container_manager.restart_container(name)}) + except Exception as e: + logger.error(f"Error restarting container {name}: {e}") + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/containers//logs', methods=['GET']) +def get_container_logs(name): + try: + from app import container_manager, is_local_request + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 + tail = request.args.get('tail', default=100, type=int) + return jsonify({'logs': container_manager.get_container_logs(name, tail=tail)}) + except Exception as e: + logger.error(f"Error getting logs for container {name}: {e}") + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/containers//stats', methods=['GET']) +def get_container_stats(name): + try: + from app import container_manager, is_local_request + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 + return jsonify(container_manager.get_container_stats(name)) + except Exception as e: + logger.error(f"Error getting stats for container {name}: {e}") + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/containers', methods=['POST']) +def create_container(): + try: + from app import container_manager, is_local_request + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 + data = request.get_json(silent=True) + if not data or 'image' not in data: + return jsonify({'error': 'Missing image parameter'}), 400 + name = data.get('name', '') + env = data.get('env', {}) + secrets = data.get('secrets', []) + if secrets: + for secret_name in secrets: + secret_value = current_app.vault_manager.get_secret(secret_name) + if secret_value is not None: + env[secret_name] = secret_value + volumes = data.get('volumes', {}) + if volumes: + allowed_prefixes = ('/home/roof/pic/data/', '/home/roof/pic/config/', '/tmp/') + for host_path in volumes.keys(): + resolved = os.path.realpath(str(host_path)) + if not any(resolved.startswith(p) for p in allowed_prefixes): + return jsonify({'error': f'Volume mount not allowed: {host_path}'}), 403 + result = container_manager.create_container( + image=data['image'], + name=name, + env=env, + volumes=volumes, + command=data.get('command', ''), + ports=data.get('ports', {}) + ) + if 'error' in result: + return jsonify(result), 500 + return jsonify(result) + except Exception as e: + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/containers/', methods=['DELETE']) +def remove_container(name): + try: + from app import container_manager, is_local_request + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 + force = request.args.get('force', default=False, type=bool) + return jsonify({'removed': container_manager.remove_container(name, force=force)}) + except Exception as e: + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/images', methods=['GET']) +def list_images(): + try: + from app import container_manager, is_local_request + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 + return jsonify(container_manager.list_images()) + except Exception as e: + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/images/pull', methods=['POST']) +def pull_image(): + try: + from app import container_manager, is_local_request + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 + data = request.get_json(silent=True) + if not data or 'image' not in data: + return jsonify({'error': 'Missing image parameter'}), 400 + result = container_manager.pull_image(data['image']) + if 'error' in result: + return jsonify(result), 500 + return jsonify(result) + except Exception as e: + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/images/', methods=['DELETE']) +def remove_image(image): + try: + from app import container_manager, is_local_request + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 + force = request.args.get('force', default=False, type=bool) + return jsonify({'removed': container_manager.remove_image(image, force=force)}) + except Exception as e: + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/volumes', methods=['GET']) +def list_volumes(): + try: + from app import container_manager, is_local_request + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 + return jsonify(container_manager.list_volumes()) + except Exception as e: + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/volumes', methods=['POST']) +def create_volume(): + try: + from app import container_manager, is_local_request + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 + data = request.get_json(silent=True) + if not data or 'name' not in data: + return jsonify({'error': 'Missing name parameter'}), 400 + result = container_manager.create_volume(data['name']) + if 'error' in result: + return jsonify(result), 500 + return jsonify(result) + except Exception as e: + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/volumes/', methods=['DELETE']) +def remove_volume(name): + try: + from app import container_manager, is_local_request + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 + force = request.args.get('force', default=False, type=bool) + return jsonify({'removed': container_manager.remove_volume(name, force=force)}) + except Exception as e: + return jsonify({'error': str(e)}), 500 diff --git a/api/routes/email.py b/api/routes/email.py new file mode 100644 index 0000000..48ce94c --- /dev/null +++ b/api/routes/email.py @@ -0,0 +1,92 @@ +import logging +from flask import Blueprint, request, jsonify +logger = logging.getLogger('picell') +bp = Blueprint('email', __name__) + +@bp.route('/api/email/users', methods=['GET']) +def get_email_users(): + """Get email users.""" + try: + from app import email_manager + users = email_manager.get_users() + return jsonify(users) + except Exception as e: + logger.error(f"Error getting email users: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/email/users', methods=['POST']) +def create_email_user(): + """Create email user.""" + try: + from app import email_manager, _configured_domain + data = request.get_json(silent=True) + if data is None: + return jsonify({"error": "No data provided"}), 400 + username = data.get('username') + domain = data.get('domain') or _configured_domain() + password = data.get('password') + if not username or not password: + return jsonify({"error": "Missing required fields: username, password"}), 400 + result = email_manager.create_email_user(username, domain, password) + return jsonify({"created": result}) + except Exception as e: + logger.error(f"Error creating email user: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/email/users/', methods=['DELETE']) +def delete_email_user(username): + """Delete email user.""" + try: + from app import email_manager, _configured_domain + domain = request.args.get('domain') or _configured_domain() + result = email_manager.delete_email_user(username, domain) + return jsonify({"deleted": result}) + except Exception as e: + logger.error(f"Error deleting email user: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/email/status', methods=['GET']) +def get_email_status(): + """Get email service status.""" + try: + from app import email_manager + status = email_manager.get_status() + return jsonify(status) + except Exception as e: + logger.error(f"Error getting email status: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/email/connectivity', methods=['GET']) +def test_email_connectivity(): + """Test email connectivity.""" + try: + from app import email_manager + result = email_manager.test_connectivity() + return jsonify(result) + except Exception as e: + logger.error(f"Error testing email connectivity: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/email/send', methods=['POST']) +def send_email(): + try: + from app import email_manager + data = request.get_json(silent=True) + if data is None: + return jsonify({"error": "No data provided"}), 400 + result = email_manager.send_email(data) + return jsonify(result) + except Exception as e: + logger.error(f"Error sending email: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/email/mailbox/', methods=['GET']) +def get_mailbox_info(username): + """Get mailbox information.""" + try: + from app import email_manager + result = email_manager.get_mailbox_info(username) + return jsonify(result) + except Exception as e: + logger.error(f"Error getting mailbox info: {e}") + return jsonify({"error": str(e)}), 500 diff --git a/api/routes/files.py b/api/routes/files.py new file mode 100644 index 0000000..a610f25 --- /dev/null +++ b/api/routes/files.py @@ -0,0 +1,159 @@ +import logging +from flask import Blueprint, request, jsonify +logger = logging.getLogger('picell') +bp = Blueprint('files', __name__) + +@bp.route('/api/files/users', methods=['GET']) +def get_file_users(): + """Get file storage users.""" + try: + from app import file_manager + users = file_manager.get_users() + return jsonify(users) + except Exception as e: + logger.error(f"Error getting file users: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/files/users', methods=['POST']) +def create_file_user(): + """Create file storage user.""" + try: + from app import file_manager + data = request.get_json(silent=True) + if data is None: + return jsonify({"error": "No data provided"}), 400 + username = data.get('username') + password = data.get('password') + if not username or not password: + return jsonify({"error": "Missing required fields: username, password"}), 400 + result = file_manager.create_user(username, password) + return jsonify({"created": result}) + except Exception as e: + logger.error(f"Error creating file user: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/files/users/', methods=['DELETE']) +def delete_file_user(username): + """Delete file storage user.""" + try: + from app import file_manager + result = file_manager.delete_user(username) + return jsonify(result) + except Exception as e: + logger.error(f"Error deleting file user: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/files/folders', methods=['POST']) +def create_folder(): + """Create folder.""" + try: + from app import file_manager + data = request.get_json(silent=True) + if data is None: + return jsonify({"error": "No data provided"}), 400 + username = data.get('username') + folder_path = data.get('folder_path') or data.get('path') + if not username or not folder_path: + return jsonify({"error": "Missing required fields: username, folder_path"}), 400 + result = file_manager.create_folder(username, folder_path) + return jsonify({"created": result}) + except ValueError as e: + return jsonify({"error": str(e)}), 400 + except Exception as e: + logger.error(f"Error creating folder: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/files/folders//', methods=['DELETE']) +def delete_folder(username, folder_path): + """Delete folder.""" + try: + from app import file_manager + result = file_manager.delete_folder(username, folder_path) + return jsonify(result) + except ValueError as e: + return jsonify({"error": str(e)}), 400 + except Exception as e: + logger.error(f"Error deleting folder: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/files/upload/', methods=['POST']) +def upload_file(username): + """Upload file.""" + try: + from app import file_manager + if 'file' not in request.files: + return jsonify({"error": "No file provided"}), 400 + + file = request.files['file'] + path = request.form.get('path', '') or file.filename or '' + file_data = file.read() + + result = file_manager.upload_file(username, path, file_data) + return jsonify({"uploaded": result}) + except ValueError as e: + return jsonify({"error": str(e)}), 400 + except Exception as e: + logger.error(f"Error uploading file: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/files/download//', methods=['GET']) +def download_file(username, file_path): + """Download file.""" + try: + from app import file_manager + result = file_manager.download_file(username, file_path) + return jsonify(result) + except ValueError as e: + return jsonify({"error": str(e)}), 400 + except Exception as e: + logger.error(f"Error downloading file: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/files/delete//', methods=['DELETE']) +def delete_file(username, file_path): + """Delete file.""" + try: + from app import file_manager + result = file_manager.delete_file(username, file_path) + return jsonify(result) + except ValueError as e: + return jsonify({"error": str(e)}), 400 + except Exception as e: + logger.error(f"Error deleting file: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/files/list/', methods=['GET']) +def list_files(username): + """List files.""" + try: + from app import file_manager + folder = request.args.get('folder', '') + result = file_manager.list_files(username, folder) + return jsonify(result) + except ValueError as e: + return jsonify({"error": str(e)}), 400 + except Exception as e: + logger.error(f"Error listing files: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/files/status', methods=['GET']) +def get_file_status(): + """Get file service status.""" + try: + from app import file_manager + status = file_manager.get_status() + return jsonify(status) + except Exception as e: + logger.error(f"Error getting file status: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/files/connectivity', methods=['GET']) +def test_file_connectivity(): + """Test file service connectivity.""" + try: + from app import file_manager + result = file_manager.test_connectivity() + return jsonify(result) + except Exception as e: + logger.error(f"Error testing file connectivity: {e}") + return jsonify({"error": str(e)}), 500 diff --git a/api/routes/network.py b/api/routes/network.py new file mode 100644 index 0000000..6a2c704 --- /dev/null +++ b/api/routes/network.py @@ -0,0 +1,109 @@ +import logging +from flask import Blueprint, request, jsonify +logger = logging.getLogger('picell') +bp = Blueprint('network', __name__) + +@bp.route('/api/dns/records', methods=['GET']) +def get_dns_records(): + try: + from app import network_manager + return jsonify(network_manager.get_dns_records()) + except Exception as e: + logger.error(f"Error getting DNS records: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/dns/records', methods=['POST']) +def add_dns_record(): + try: + from app import network_manager + data = request.get_json(silent=True) + if data is None: + return jsonify({"error": "No data provided"}), 400 + return jsonify(network_manager.add_dns_record(**data)) + except Exception as e: + logger.error(f"Error adding DNS record: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/dns/records', methods=['DELETE']) +def remove_dns_record(): + try: + from app import network_manager + data = request.get_json(silent=True) + return jsonify(network_manager.remove_dns_record(**data)) + except Exception as e: + logger.error(f"Error removing DNS record: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/dhcp/leases', methods=['GET']) +def get_dhcp_leases(): + try: + from app import network_manager + return jsonify(network_manager.get_dhcp_leases()) + except Exception as e: + logger.error(f"Error getting DHCP leases: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/dhcp/reservations', methods=['POST']) +def add_dhcp_reservation(): + try: + from app import network_manager + data = request.get_json(silent=True) + if not data: + return jsonify({"error": "No data provided"}), 400 + for field in ('mac', 'ip'): + if field not in data: + return jsonify({"error": f"Missing required field: {field}"}), 400 + result = network_manager.add_dhcp_reservation(data['mac'], data['ip'], data.get('hostname', '')) + return jsonify({"success": result}) + except Exception as e: + logger.error(f"Error adding DHCP reservation: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/dhcp/reservations', methods=['DELETE']) +def remove_dhcp_reservation(): + try: + from app import network_manager + data = request.get_json(silent=True) + if not data or 'mac' not in data: + return jsonify({"error": "Missing required field: mac"}), 400 + result = network_manager.remove_dhcp_reservation(data['mac']) + return jsonify({"success": result}) + except Exception as e: + logger.error(f"Error removing DHCP reservation: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/ntp/status', methods=['GET']) +def get_ntp_status(): + try: + from app import network_manager + return jsonify(network_manager.get_ntp_status()) + except Exception as e: + logger.error(f"Error getting NTP status: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/network/info', methods=['GET']) +def get_network_info(): + try: + from app import network_manager + return jsonify(network_manager.get_network_info()) + except Exception as e: + logger.error(f"Error getting network info: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/dns/status', methods=['GET']) +def get_dns_status(): + try: + from app import network_manager + return jsonify(network_manager.get_dns_status()) + except Exception as e: + logger.error(f"Error getting DNS status: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/network/test', methods=['POST']) +def test_network(): + try: + from app import network_manager + return jsonify(network_manager.test_connectivity()) + except Exception as e: + logger.error(f"Error testing network: {e}") + return jsonify({"error": str(e)}), 500 diff --git a/api/routes/peer_dashboard.py b/api/routes/peer_dashboard.py new file mode 100644 index 0000000..1a2e474 --- /dev/null +++ b/api/routes/peer_dashboard.py @@ -0,0 +1,115 @@ +import logging +from flask import Blueprint, jsonify, session +logger = logging.getLogger('picell') +bp = Blueprint('peer_dashboard', __name__) + +@bp.route('/api/peer/dashboard', methods=['GET']) +def peer_dashboard(): + try: + from app import peer_registry, wireguard_manager, _configured_domain + peer_name = session.get('peer_name') + peer = peer_registry.get_peer(peer_name) if peer_name else None + if not peer: + return jsonify({'error': 'Peer not found'}), 404 + + wg_stats = {'online': None, 'transfer_rx': 0, 'transfer_tx': 0, 'last_handshake': None} + public_key = peer.get('public_key') + if public_key: + try: + wg_stats = wireguard_manager.get_peer_status(public_key) + except Exception: + pass + + peer_ip = peer.get('ip', '') + allowed_ips = f"{peer_ip.split('/')[0]}/32" if peer_ip else '' + domain = _configured_domain() + _svc_url_map = { + 'calendar': f'http://calendar.{domain}', + 'files': f'http://files.{domain}', + 'mail': f'http://mail.{domain}', + 'webdav': f'http://webdav.{domain}', + } + service_urls = { + svc: _svc_url_map[svc] + for svc in peer.get('service_access', []) + if svc in _svc_url_map + } + return jsonify({ + 'name': peer_name, + 'ip': peer_ip, + 'service_access': peer.get('service_access', []), + 'service_urls': service_urls, + 'online': wg_stats.get('online'), + 'transfer_rx': wg_stats.get('transfer_rx', 0), + 'transfer_tx': wg_stats.get('transfer_tx', 0), + 'last_handshake': wg_stats.get('last_handshake'), + 'allowed_ips': peer.get('allowed_ips', allowed_ips), + }) + except Exception as e: + return jsonify({'error': str(e)}), 500 + + +@bp.route('/api/peer/services', methods=['GET']) +def peer_services(): + try: + from app import peer_registry, wireguard_manager, config_manager, _configured_domain, _resolve_peer_dns + peer_name = session.get('peer_name') + peer = peer_registry.get_peer(peer_name) if peer_name else None + if not peer: + return jsonify({'error': 'Peer not found'}), 404 + + domain = _configured_domain() + peer_ip = peer.get('ip', '') + + server_public_key = '' + wg_port = 51820 + server_endpoint = '' + try: + server_public_key = wireguard_manager.get_keys().get('public_key', '') + wg_port = config_manager.configs.get('_identity', {}).get('wireguard_port', 51820) + srv = wireguard_manager.get_server_config() + server_endpoint = srv.get('endpoint') or '' + except Exception: + pass + + wg_config = '' + peer_private_key = peer.get('private_key', '') + if peer_private_key: + try: + internet_access = peer.get('internet_access', True) + allowed_ips = wireguard_manager.FULL_TUNNEL_IPS if internet_access else wireguard_manager.get_split_tunnel_ips() + wg_config = wireguard_manager.get_peer_config( + peer_name=peer_name, + peer_ip=peer_ip, + peer_private_key=peer_private_key, + server_endpoint=server_endpoint, + allowed_ips=allowed_ips, + ) + except Exception: + pass + + return jsonify({ + 'username': peer_name, + 'wireguard': { + 'ip': peer_ip, + 'server_public_key': server_public_key, + 'endpoint_port': wg_port, + 'dns': _resolve_peer_dns(), + 'config': wg_config, + }, + 'email': { + 'address': f'{peer_name}@{domain}', + 'smtp': {'host': f'mail.{domain}', 'port': 587}, + 'imap': {'host': f'mail.{domain}', 'port': 993}, + }, + 'caldav': { + 'url': f'http://calendar.{domain}', + 'username': peer_name, + }, + 'files': { + 'url': f'http://files.{domain}', + 'username': peer_name, + }, + }) + except Exception as e: + return jsonify({'error': str(e)}), 500 diff --git a/api/routes/peers.py b/api/routes/peers.py new file mode 100644 index 0000000..e714a02 --- /dev/null +++ b/api/routes/peers.py @@ -0,0 +1,299 @@ +import logging +import ipaddress +from flask import Blueprint, request, jsonify, session +logger = logging.getLogger('picell') +bp = Blueprint('peers', __name__) + + +def _next_peer_ip() -> str: + """Auto-assign the next free host address from the configured VPN subnet.""" + from app import wireguard_manager, peer_registry + server_addr = wireguard_manager._get_configured_address() + network = ipaddress.ip_network(server_addr, strict=False) + server_ip = str(ipaddress.ip_interface(server_addr).ip) + used = {p.get('ip', '').split('/')[0] for p in peer_registry.list_peers()} + for host in network.hosts(): + ip = str(host) + if ip == server_ip: + continue + if ip not in used: + return ip + raise ValueError(f'No free IPs left in {network}') + + +@bp.route('/api/peers', methods=['GET']) +def get_peers(): + try: + from app import peer_registry + return jsonify(peer_registry.list_peers()) + except Exception as e: + logger.error(f"Error getting peers: {e}") + return jsonify({"error": str(e)}), 500 + + +@bp.route('/api/peers', methods=['POST']) +def add_peer(): + """Add a peer and auto-provision auth/email/calendar/files accounts.""" + try: + from app import (peer_registry, wireguard_manager, firewall_manager, + email_manager, calendar_manager, file_manager, auth_manager, + cell_link_manager, _configured_domain, COREFILE_PATH) + data = request.get_json(silent=True) + if data is None: + return jsonify({"error": "No data provided"}), 400 + + for field in ('name', 'public_key'): + if field not in data: + return jsonify({"error": f"Missing required field: {field}"}), 400 + + password = data.get('password') or '' + if not password: + return jsonify({"error": "Missing required field: password"}), 400 + if len(password) < 10: + return jsonify({"error": "password must be at least 10 characters"}), 400 + + try: + assigned_ip = data.get('ip') or _next_peer_ip() + except ValueError as e: + return jsonify({'error': str(e)}), 409 + + _valid_services = {'calendar', 'files', 'mail', 'webdav'} + service_access = data.get('service_access', list(_valid_services)) + if not isinstance(service_access, list) or not all(s in _valid_services for s in service_access): + return jsonify({"error": f"service_access must be a list of: {sorted(_valid_services)}"}), 400 + + peer_name = data['name'] + + if not auth_manager.create_user(peer_name, password, 'peer'): + return jsonify({"error": "Could not create auth account (duplicate name?)"}), 400 + + provisioned = ['auth'] + domain = _configured_domain() + for step_name, step_fn in [ + ('email', lambda: email_manager.create_email_user(peer_name, domain, password)), + ('calendar', lambda: calendar_manager.create_calendar_user(peer_name, password)), + ('files', lambda: file_manager.create_user(peer_name, password)), + ]: + try: + if step_fn(): + provisioned.append(step_name) + else: + logger.warning(f"Peer {peer_name}: {step_name} account creation returned False") + except Exception as e: + logger.warning(f"Peer {peer_name}: {step_name} account creation failed (non-fatal): {e}") + + peer_info = { + 'peer': peer_name, + 'ip': assigned_ip, + 'public_key': data['public_key'], + 'private_key': data.get('private_key'), + 'server_public_key': data.get('server_public_key'), + 'server_endpoint': data.get('server_endpoint'), + 'allowed_ips': data.get('allowed_ips'), + 'persistent_keepalive': data.get('persistent_keepalive'), + 'description': data.get('description'), + 'internet_access': data.get('internet_access', True), + 'service_access': service_access, + 'peer_access': data.get('peer_access', True), + 'config_needs_reinstall': False, + } + + peer_added_to_registry = False + firewall_applied = False + try: + success = peer_registry.add_peer(peer_info) + if not success: + for svc in ('files', 'calendar', 'email', 'auth'): + try: + if svc == 'files': + file_manager.delete_user(peer_name) + elif svc == 'calendar': + calendar_manager.delete_calendar_user(peer_name) + elif svc == 'email': + email_manager.delete_email_user(peer_name, _configured_domain()) + elif svc == 'auth': + auth_manager.delete_user(peer_name) + except Exception: + pass + return jsonify({"error": f"Peer {peer_name} already exists"}), 400 + peer_added_to_registry = True + + firewall_manager.apply_peer_rules(peer_info['ip'], peer_info) + firewall_applied = True + + wg_allowed = f"{assigned_ip}/32" if '/' not in assigned_ip else assigned_ip + try: + wireguard_manager.add_peer(peer_name, data['public_key'], endpoint_ip='', allowed_ips=wg_allowed) + except Exception as wg_err: + logger.warning(f"Peer {peer_name}: WireGuard server config update failed (non-fatal): {wg_err}") + + firewall_manager.apply_all_dns_rules(peer_registry.list_peers(), COREFILE_PATH, _configured_domain(), + cell_links=cell_link_manager.list_connections()) + return jsonify({"message": f"Peer {peer_name} added successfully", "ip": assigned_ip}), 201 + + except Exception as e: + if firewall_applied: + try: + firewall_manager.clear_peer_rules(peer_info['ip']) + except Exception: + pass + if peer_added_to_registry: + try: + peer_registry.remove_peer(peer_name) + except Exception: + pass + logger.error(f"Error adding peer {peer_name}: {e}") + return jsonify({'error': str(e)}), 500 + + except Exception as e: + logger.error(f"Error adding peer: {e}") + return jsonify({"error": str(e)}), 500 + + +@bp.route('/api/peers/', methods=['PUT']) +def update_peer(peer_name): + try: + from app import peer_registry, firewall_manager, cell_link_manager, _configured_domain, COREFILE_PATH + data = request.get_json(silent=True) or {} + existing = peer_registry.get_peer(peer_name) + if not existing: + return jsonify({"error": "Peer not found"}), 404 + + config_changed = ( + ('internet_access' in data and data['internet_access'] != existing.get('internet_access', True)) or + ('ip' in data and data['ip'] != existing.get('ip')) or + ('persistent_keepalive' in data and data['persistent_keepalive'] != existing.get('persistent_keepalive')) + ) + + updates = {k: v for k, v in data.items()} + if config_changed: + updates['config_needs_reinstall'] = True + + success = peer_registry.update_peer(peer_name, updates) + if success: + updated_peer = peer_registry.get_peer(peer_name) + if updated_peer: + firewall_manager.apply_peer_rules(updated_peer['ip'], updated_peer) + firewall_manager.apply_all_dns_rules(peer_registry.list_peers(), COREFILE_PATH, _configured_domain(), + cell_links=cell_link_manager.list_connections()) + return jsonify({"message": f"Peer {peer_name} updated", "config_changed": config_changed}) + return jsonify({"error": "Update failed"}), 500 + except Exception as e: + logger.error(f"Error updating peer {peer_name}: {e}") + return jsonify({"error": str(e)}), 500 + + +@bp.route('/api/peers//clear-reinstall', methods=['POST']) +def clear_peer_reinstall(peer_name): + try: + from app import peer_registry + peer_registry.clear_reinstall_flag(peer_name) + return jsonify({"message": "Reinstall flag cleared"}) + except Exception as e: + return jsonify({"error": str(e)}), 500 + + +@bp.route('/api/peers/', methods=['DELETE']) +def remove_peer(peer_name): + try: + from app import (peer_registry, wireguard_manager, firewall_manager, + email_manager, calendar_manager, file_manager, auth_manager, + cell_link_manager, _configured_domain, COREFILE_PATH) + peer = peer_registry.get_peer(peer_name) + if not peer: + return jsonify({"message": f"Peer {peer_name} not found or already removed"}) + peer_ip = peer.get('ip') + peer_pubkey = peer.get('public_key', '') + success = peer_registry.remove_peer(peer_name) + if success: + if peer_ip: + firewall_manager.clear_peer_rules(peer_ip) + firewall_manager.apply_all_dns_rules(peer_registry.list_peers(), COREFILE_PATH, _configured_domain(), + cell_links=cell_link_manager.list_connections()) + if peer_pubkey: + try: + wireguard_manager.remove_peer(peer_pubkey) + except Exception as wg_err: + logger.warning(f"Peer {peer_name}: WireGuard removal failed (non-fatal): {wg_err}") + for _cleanup in [ + lambda: email_manager.delete_email_user(peer_name, _configured_domain()), + lambda: calendar_manager.delete_calendar_user(peer_name), + lambda: file_manager.delete_user(peer_name), + lambda: auth_manager.delete_user(peer_name), + ]: + try: + _cleanup() + except Exception: + pass + return jsonify({"message": f"Peer {peer_name} removed successfully"}) + except Exception as e: + logger.error(f"Error removing peer: {e}") + return jsonify({"error": str(e)}), 500 + + +@bp.route('/api/peers/register', methods=['POST']) +def register_peer(): + try: + from app import peer_registry + data = request.get_json(silent=True) + if data is None: + return jsonify({"error": "No data provided"}), 400 + return jsonify(peer_registry.register_peer(data)) + except Exception as e: + logger.error(f"Error registering peer: {e}") + return jsonify({"error": str(e)}), 500 + + +@bp.route('/api/peers//unregister', methods=['DELETE']) +def unregister_peer(peer_name): + try: + from app import peer_registry + return jsonify(peer_registry.unregister_peer(peer_name)) + except Exception as e: + logger.error(f"Error unregistering peer: {e}") + return jsonify({"error": str(e)}), 500 + + +@bp.route('/api/peers//update-ip', methods=['PUT']) +def update_peer_ip_registry(peer_name): + try: + from app import peer_registry, routing_manager + data = request.get_json(silent=True) + new_ip = data.get('ip') if data else None + if not new_ip: + return jsonify({"error": "Missing ip"}), 400 + success = peer_registry.update_peer_ip(peer_name, new_ip) + if success: + try: + routing_manager.update_peer_ip(peer_name, new_ip) + except Exception as e: + logger.warning(f"RoutingManager update_peer_ip failed: {e}") + return jsonify({"message": f"IP update received for {peer_name}"}) + return jsonify({"error": f"Peer {peer_name} not found"}), 404 + except Exception as e: + logger.error(f"Error updating peer IP: {e}") + return jsonify({"error": str(e)}), 500 + + +@bp.route('/api/ip-update', methods=['POST']) +def ip_update(): + try: + from app import peer_registry, routing_manager + data = request.get_json(silent=True) + if data is None: + return jsonify({"error": "No data provided"}), 400 + peer_name = data.get('peer') + new_ip = data.get('ip') + if not peer_name or not new_ip: + return jsonify({"error": "Missing peer or ip"}), 400 + success = peer_registry.update_peer_ip(peer_name, new_ip) + if success: + try: + routing_manager.update_peer_ip(peer_name, new_ip) + except Exception as e: + logger.warning(f"RoutingManager update_peer_ip failed: {e}") + return jsonify({"message": f"IP update received for {peer_name}"}) + return jsonify({"error": f"Peer {peer_name} not found"}), 404 + except Exception as e: + logger.error(f"Error handling IP update: {e}") + return jsonify({"error": str(e)}), 500 diff --git a/api/routes/routing.py b/api/routes/routing.py new file mode 100644 index 0000000..50fe979 --- /dev/null +++ b/api/routes/routing.py @@ -0,0 +1,207 @@ +import logging +from flask import Blueprint, request, jsonify +logger = logging.getLogger('picell') +bp = Blueprint('routing', __name__) + +@bp.route('/api/routing/status', methods=['GET']) +def get_routing_status(): + try: + from app import routing_manager + return jsonify(routing_manager.get_status()) + except Exception as e: + logger.error(f"Error getting routing status: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/routing/setup', methods=['POST']) +def setup_routing(): + try: + from app import routing_manager + status = routing_manager.get_status() + return jsonify({'success': True, 'message': 'Routing managed by WireGuard PostUp rules', **status}) + except Exception as e: + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/routing/nat', methods=['GET']) +def get_nat_rules(): + try: + from app import routing_manager + return jsonify({"nat_rules": routing_manager.get_nat_rules()}) + except Exception as e: + logger.error(f"Error getting NAT rules: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/routing/nat', methods=['POST']) +def add_nat_rule(): + try: + from app import routing_manager + data = request.get_json(silent=True) or {} + result = routing_manager.add_nat_rule( + source_network=data.get('source_network'), + target_interface=data.get('target_interface'), + masquerade=data.get('masquerade', True), + nat_type=data.get('nat_type', 'MASQUERADE'), + protocol=data.get('protocol', 'ALL'), + external_port=data.get('external_port'), + internal_ip=data.get('internal_ip'), + internal_port=data.get('internal_port') + ) + return jsonify({'success': result}) + except Exception as e: + logger.error(f"Error adding NAT rule: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/routing/nat/', methods=['DELETE']) +def remove_nat_rule(rule_id): + try: + from app import routing_manager + return jsonify(routing_manager.remove_nat_rule(rule_id)) + except Exception as e: + logger.error(f"Error removing NAT rule: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/routing/peers', methods=['GET']) +def get_peer_routes(): + try: + from app import routing_manager + return jsonify({"peer_routes": routing_manager.get_peer_routes()}) + except Exception as e: + logger.error(f"Error getting peer routes: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/routing/peers', methods=['POST']) +def add_peer_route(): + try: + from app import routing_manager + data = request.get_json(silent=True) or {} + peer_name = data.get('peer_name') + peer_ip = data.get('peer_ip') + if not peer_name or not peer_ip: + return jsonify({"error": "Missing required fields: peer_name, peer_ip"}), 400 + result = routing_manager.add_peer_route( + peer_name, peer_ip, + data.get('allowed_networks', []), + data.get('route_type', 'lan') + ) + return jsonify({"added": result}) + except Exception as e: + logger.error(f"Error adding peer route: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/routing/peers/', methods=['DELETE']) +def remove_peer_route(peer_name): + try: + from app import routing_manager + return jsonify(routing_manager.remove_peer_route(peer_name)) + except Exception as e: + logger.error(f"Error removing peer route: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/routing/exit-nodes', methods=['POST']) +def add_exit_node(): + try: + from app import routing_manager + data = request.get_json(silent=True) or {} + peer_name = data.get('peer_name') + peer_ip = data.get('peer_ip') + if not peer_name or not peer_ip: + return jsonify({"error": "Missing required fields: peer_name, peer_ip"}), 400 + return jsonify({"added": routing_manager.add_exit_node(peer_name, peer_ip, data.get('allowed_domains'))}) + except Exception as e: + logger.error(f"Error adding exit node: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/routing/bridge', methods=['POST']) +def add_bridge_route(): + try: + from app import routing_manager + data = request.get_json(silent=True) or {} + source_peer = data.get('source_peer') + target_peer = data.get('target_peer') + if not source_peer or not target_peer: + return jsonify({"error": "Missing required fields: source_peer, target_peer"}), 400 + return jsonify({"added": routing_manager.add_bridge_route(source_peer, target_peer, data.get('allowed_networks', []))}) + except Exception as e: + logger.error(f"Error adding bridge route: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/routing/split', methods=['POST']) +def add_split_route(): + try: + from app import routing_manager + data = request.get_json(silent=True) or {} + network = data.get('network') + exit_peer = data.get('exit_peer') + if not network or not exit_peer: + return jsonify({"error": "Missing required fields: network, exit_peer"}), 400 + return jsonify({"added": routing_manager.add_split_route(network, exit_peer, data.get('fallback_peer'))}) + except Exception as e: + logger.error(f"Error adding split route: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/routing/firewall', methods=['GET']) +def get_firewall_rules(): + try: + from app import routing_manager + return jsonify({"firewall_rules": routing_manager.get_firewall_rules()}) + except Exception as e: + logger.error(f"Error getting firewall rules: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/routing/firewall', methods=['POST']) +def add_firewall_rule(): + try: + from app import routing_manager + data = request.get_json(silent=True) or {} + result = routing_manager.add_firewall_rule( + rule_type=data.get('rule_type'), + source=data.get('source'), + destination=data.get('destination'), + action=data.get('action', 'ACCEPT'), + port=data.get('port'), + protocol=data.get('protocol', 'ALL'), + port_range=data.get('port_range') + ) + return jsonify({'success': result}) + except Exception as e: + logger.error(f"Error adding firewall rule: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/routing/firewall/', methods=['DELETE']) +def remove_firewall_rule(rule_id): + try: + from app import routing_manager + result = routing_manager.remove_firewall_rule(rule_id) + return jsonify({'success': result}), (200 if result else 404) + except Exception as e: + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/routing/live-iptables', methods=['GET']) +def get_live_iptables(): + try: + from app import routing_manager + return jsonify(routing_manager.get_live_iptables()) + except Exception as e: + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/routing/connectivity', methods=['POST']) +def test_routing_connectivity(): + try: + from app import routing_manager + data = request.get_json(silent=True) or {} + return jsonify(routing_manager.test_routing_connectivity( + data.get('target_ip', '8.8.8.8'), + data.get('via_peer') + )) + except Exception as e: + logger.error(f"Error testing routing connectivity: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/routing/logs', methods=['GET']) +def get_routing_logs(): + try: + from app import routing_manager + lines = request.args.get('lines', 50, type=int) + return jsonify(routing_manager.get_logs(lines)) + except Exception as e: + logger.error(f"Error getting routing logs: {e}") + return jsonify({"error": str(e)}), 500 diff --git a/api/routes/services.py b/api/routes/services.py new file mode 100644 index 0000000..13791e6 --- /dev/null +++ b/api/routes/services.py @@ -0,0 +1,291 @@ +import logging +import json +import os +from datetime import datetime +from flask import Blueprint, request, jsonify +logger = logging.getLogger('picell') +bp = Blueprint('services', __name__) + +@bp.route('/api/services/bus/status', methods=['GET']) +def get_service_bus_status(): + try: + from app import service_bus + return jsonify(service_bus.get_service_status_summary()) + except Exception as e: + logger.error(f"Error getting service bus status: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/services/bus/events', methods=['GET']) +def get_service_bus_events(): + try: + from app import service_bus + from service_bus import EventType + event_type = request.args.get('type') + source = request.args.get('source') + limit = int(request.args.get('limit', 100)) + events = service_bus.get_event_history( + EventType(event_type) if event_type else None, + source, + limit + ) + return jsonify([{ + 'event_id': e.event_id, + 'event_type': e.event_type.value, + 'source': e.source, + 'data': e.data, + 'timestamp': e.timestamp.isoformat() + } for e in events]) + except Exception as e: + logger.error(f"Error getting service bus events: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/services/bus/services//start', methods=['POST']) +def start_service(service_name): + try: + from app import service_bus + success = service_bus.orchestrate_service_start(service_name) + if success: + return jsonify({"message": f"Service {service_name} started successfully"}) + return jsonify({"error": f"Failed to start service {service_name}"}), 500 + except Exception as e: + logger.error(f"Error starting service {service_name}: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/services/bus/services//stop', methods=['POST']) +def stop_service(service_name): + try: + from app import service_bus + success = service_bus.orchestrate_service_stop(service_name) + if success: + return jsonify({"message": f"Service {service_name} stopped successfully"}) + return jsonify({"error": f"Failed to stop service {service_name}"}), 500 + except Exception as e: + logger.error(f"Error stopping service {service_name}: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/services/bus/services//restart', methods=['POST']) +def restart_service(service_name): + try: + from app import service_bus + success = service_bus.orchestrate_service_restart(service_name) + if success: + return jsonify({"message": f"Service {service_name} restarted successfully"}) + return jsonify({"error": f"Failed to restart service {service_name}"}), 500 + except Exception as e: + logger.error(f"Error restarting service {service_name}: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/logs/services/', methods=['GET']) +def get_service_logs(service): + try: + from app import log_manager + level = request.args.get('level', 'INFO') + lines = int(request.args.get('lines', 50)) + logs = log_manager.get_service_logs(service, level, lines) + return jsonify({"service": service, "logs": logs}) + except Exception as e: + logger.error(f"Error getting logs for {service}: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/logs/search', methods=['POST']) +def search_logs(): + try: + from app import log_manager + data = request.get_json(silent=True) or {} + results = log_manager.search_logs( + data.get('query', ''), + data.get('time_range'), + data.get('services'), + data.get('level') + ) + return jsonify({"results": results, "count": len(results)}) + except Exception as e: + logger.error(f"Error searching logs: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/logs/export', methods=['POST']) +def export_logs(): + try: + from app import log_manager + data = request.get_json(silent=True) or {} + format = data.get('format', 'json') + log_data = log_manager.export_logs(format, data.get('filters', {})) + return jsonify({"logs": log_data, "format": format}) + except Exception as e: + logger.error(f"Error exporting logs: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/logs/statistics', methods=['GET']) +def get_log_statistics(): + try: + from app import log_manager + return jsonify(log_manager.get_log_statistics(request.args.get('service'))) + except Exception as e: + logger.error(f"Error getting log statistics: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/logs/rotate', methods=['POST']) +def rotate_logs(): + try: + from app import log_manager + data = request.get_json(silent=True) or {} + log_manager.rotate_logs(data.get('service')) + return jsonify({"message": "Logs rotated successfully"}) + except Exception as e: + logger.error(f"Error rotating logs: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/logs/files', methods=['GET']) +def get_log_file_infos(): + try: + from app import log_manager + return jsonify(log_manager.get_all_log_file_infos()) + except Exception as e: + logger.error(f"Error listing log files: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/logs/verbosity', methods=['GET']) +def get_log_verbosity(): + try: + from app import log_manager + return jsonify(log_manager.get_service_levels()) + except Exception as e: + logger.error(f"Error getting log verbosity: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/logs/verbosity', methods=['PUT']) +def set_log_verbosity(): + try: + from app import log_manager + data = request.get_json(silent=True) or {} + for service, level in data.items(): + log_manager.set_service_level(service, level) + levels_file = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'log_levels.json') + os.makedirs(os.path.dirname(levels_file), exist_ok=True) + current = {} + if os.path.exists(levels_file): + try: + with open(levels_file) as f: + current = json.load(f) + except Exception: + pass + current.update(data) + with open(levels_file, 'w') as f: + json.dump(current, f, indent=2) + return jsonify({"message": "Log levels updated", "levels": log_manager.get_service_levels()}) + except Exception as e: + logger.error(f"Error setting log verbosity: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/services/status', methods=['GET']) +def get_all_services_status(): + try: + from app import service_bus + services_status = {} + for service_name in service_bus.list_services(): + try: + service = service_bus.get_service(service_name) + status = service.get_status() + if isinstance(status, dict): + clean_status = { + 'status': status.get('status', 'unknown'), + 'running': status.get('running', False), + 'timestamp': status.get('timestamp', datetime.utcnow().isoformat()) + } + if service_name == 'network': + clean_status.update({ + 'dns_status': status.get('dns_running', False), + 'dhcp_status': status.get('dhcp_running', False), + 'ntp_status': status.get('ntp_running', False) + }) + elif service_name == 'wireguard': + clean_status.update({ + 'peers_count': status.get('peers_count', 0), + 'interface': status.get('interface', 'unknown') + }) + elif service_name == 'email': + clean_status.update({ + 'users_count': status.get('users_count', 0), + 'domain': status.get('domain', 'unknown') + }) + elif service_name == 'calendar': + clean_status.update({ + 'users_count': status.get('users_count', 0), + 'calendars_count': status.get('calendars_count', 0) + }) + elif service_name == 'files': + clean_status.update({ + 'users_count': status.get('users_count', 0), + 'storage_used': status.get('total_storage_used', {}) + }) + elif service_name == 'routing': + clean_status.update({ + 'nat_rules_count': status.get('nat_rules_count', 0), + 'peer_routes_count': status.get('peer_routes_count', 0), + 'firewall_rules_count': status.get('firewall_rules_count', 0) + }) + elif service_name == 'vault': + clean_status.update({ + 'certificates_count': status.get('certificates_count', 0), + 'trusted_keys_count': status.get('trusted_keys_count', 0) + }) + services_status[service_name] = clean_status + else: + services_status[service_name] = {'status': str(status), 'running': bool(status)} + except Exception as e: + services_status[service_name] = {'error': str(e), 'status': 'offline', 'running': False} + return jsonify({ + "network": services_status.get('network', {}), + "wireguard": services_status.get('wireguard', {}), + "email": services_status.get('email', {}), + "calendar": services_status.get('calendar', {}), + "files": services_status.get('files', {}), + "routing": services_status.get('routing', {}), + "vault": services_status.get('vault', {}), + "timestamp": datetime.utcnow().isoformat() + }) + except Exception as e: + logger.error(f"Error getting all services status: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/services/connectivity', methods=['GET']) +def test_all_services_connectivity(): + try: + from app import service_bus + connectivity_results = {} + for service_name in service_bus.list_services(): + try: + service = service_bus.get_service(service_name) + if hasattr(service, 'test_connectivity'): + connectivity_results[service_name] = service.test_connectivity() + else: + connectivity_results[service_name] = {'status': 'ok', 'message': 'No connectivity test available'} + except Exception as e: + connectivity_results[service_name] = {'status': 'error', 'message': str(e)} + return jsonify({ + "network": connectivity_results.get('network', {}), + "wireguard": connectivity_results.get('wireguard', {}), + "email": connectivity_results.get('email', {}), + "calendar": connectivity_results.get('calendar', {}), + "files": connectivity_results.get('files', {}), + "routing": connectivity_results.get('routing', {}), + "timestamp": datetime.utcnow().isoformat() + }) + except Exception as e: + logger.error(f"Error testing all services connectivity: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/logs', methods=['GET']) +def get_backend_logs(): + log_file = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'picell.log') + lines = int(request.args.get('lines', 100)) + try: + if not os.path.exists(log_file): + return jsonify({"error": "Log file not found."}), 404 + with open(log_file, 'r', encoding='utf-8', errors='ignore') as f: + all_lines = f.readlines() + tail_lines = all_lines[-lines:] if lines > 0 else all_lines + return jsonify({"log": ''.join(tail_lines)}) + except Exception as e: + logger.error(f"Error reading log file: {e}") + return jsonify({"error": str(e)}), 500 diff --git a/api/routes/vault.py b/api/routes/vault.py new file mode 100644 index 0000000..ab5b393 --- /dev/null +++ b/api/routes/vault.py @@ -0,0 +1,165 @@ +import logging +import os +from flask import Blueprint, request, jsonify, current_app +logger = logging.getLogger('picell') +bp = Blueprint('vault', __name__) + +@bp.route('/api/vault/status', methods=['GET']) +def get_vault_status(): + try: + return jsonify(current_app.vault_manager.get_status()) + except Exception as e: + logger.error(f"Error getting vault status: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/vault/certificates', methods=['GET']) +def get_certificates(): + try: + return jsonify(current_app.vault_manager.list_certificates()) + except Exception as e: + logger.error(f"Error getting certificates: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/vault/certificates', methods=['POST']) +def generate_certificate(): + try: + data = request.get_json(silent=True) + if data is None: + return jsonify({"error": "No data provided"}), 400 + result = current_app.vault_manager.generate_certificate( + common_name=data['common_name'], + domains=data.get('domains', []), + key_size=data.get('key_size', 2048), + days=data.get('days', 365) + ) + return jsonify(result) + except Exception as e: + logger.error(f"Error generating certificate: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/vault/certificates/', methods=['DELETE']) +def revoke_certificate(common_name): + try: + return jsonify({"revoked": current_app.vault_manager.revoke_certificate(common_name)}) + except Exception as e: + logger.error(f"Error revoking certificate: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/vault/ca/certificate', methods=['GET']) +def get_ca_certificate(): + try: + return jsonify({"certificate": current_app.vault_manager.get_ca_certificate()}) + except Exception as e: + logger.error(f"Error getting CA certificate: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/vault/age/public-key', methods=['GET']) +def get_age_public_key(): + try: + return jsonify({"public_key": current_app.vault_manager.get_age_public_key()}) + except Exception as e: + logger.error(f"Error getting Age public key: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/vault/trust/keys', methods=['GET']) +def get_trusted_keys(): + try: + return jsonify(current_app.vault_manager.get_trusted_keys()) + except Exception as e: + logger.error(f"Error getting trusted keys: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/vault/trust/keys', methods=['POST']) +def add_trusted_key(): + try: + data = request.get_json(silent=True) + if data is None: + return jsonify({"error": "No data provided"}), 400 + result = current_app.vault_manager.add_trusted_key( + name=data['name'], + public_key=data['public_key'], + trust_level=data.get('trust_level', 'direct') + ) + return jsonify({"added": result}) + except Exception as e: + logger.error(f"Error adding trusted key: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/vault/trust/keys/', methods=['DELETE']) +def remove_trusted_key(name): + try: + return jsonify({"removed": current_app.vault_manager.remove_trusted_key(name)}) + except Exception as e: + logger.error(f"Error removing trusted key: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/vault/trust/verify', methods=['POST']) +def verify_trust_chain(): + try: + data = request.get_json(silent=True) + if data is None: + return jsonify({"error": "No data provided"}), 400 + result = current_app.vault_manager.verify_trust_chain( + peer_name=data['peer_name'], + signature=data['signature'], + data=data['data'] + ) + return jsonify({"verified": result}) + except Exception as e: + logger.error(f"Error verifying trust chain: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/vault/trust/chains', methods=['GET']) +def get_trust_chains(): + try: + return jsonify(current_app.vault_manager.get_trust_chains()) + except Exception as e: + logger.error(f"Error getting trust chains: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/vault/secrets', methods=['GET']) +def list_secrets(): + try: + from app import is_local_request + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 + return jsonify({'secrets': current_app.vault_manager.list_secrets()}) + except Exception as e: + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/vault/secrets', methods=['POST']) +def store_secret(): + try: + from app import is_local_request + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 + data = request.get_json(silent=True) + if not data or 'name' not in data or 'value' not in data: + return jsonify({'error': 'Missing name or value'}), 400 + current_app.vault_manager.store_secret(data['name'], data['value']) + return jsonify({'stored': True}) + except Exception as e: + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/vault/secrets/', methods=['GET']) +def get_secret(name): + try: + from app import is_local_request + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 + value = current_app.vault_manager.get_secret(name) + if value is None: + return jsonify({'error': 'Not found'}), 404 + return jsonify({'name': name, 'value': value}) + except Exception as e: + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/vault/secrets/', methods=['DELETE']) +def delete_secret(name): + try: + from app import is_local_request + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 + return jsonify({'deleted': current_app.vault_manager.delete_secret(name)}) + except Exception as e: + return jsonify({'error': str(e)}), 500 diff --git a/api/routes/wireguard.py b/api/routes/wireguard.py new file mode 100644 index 0000000..9f4639f --- /dev/null +++ b/api/routes/wireguard.py @@ -0,0 +1,236 @@ +import logging +from flask import Blueprint, request, jsonify +logger = logging.getLogger('picell') +bp = Blueprint('wireguard', __name__) + +@bp.route('/api/wireguard/keys', methods=['GET']) +def get_wireguard_keys(): + try: + from app import wireguard_manager + keys = wireguard_manager.get_keys() + return jsonify({ + 'public_key': keys.get('public_key', ''), + 'has_private_key': bool(keys.get('private_key')), + }) + except Exception as e: + logger.error(f"Error getting WireGuard keys: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/wireguard/keys/peer', methods=['POST']) +def generate_peer_keys(): + try: + from app import wireguard_manager + data = request.get_json(silent=True) or {} + name = data.get('name') or data.get('peer_name') + if not name: + return jsonify({"error": "Missing peer name"}), 400 + return jsonify(wireguard_manager.generate_peer_keys(name)) + except Exception as e: + logger.error(f"Error generating peer keys: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/wireguard/config', methods=['GET']) +def get_wireguard_config(): + try: + from app import wireguard_manager + return jsonify(wireguard_manager.get_config()) + except Exception as e: + logger.error(f"Error getting WireGuard config: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/wireguard/peers', methods=['GET']) +def get_wireguard_peers(): + try: + from app import wireguard_manager + return jsonify(wireguard_manager.get_peers()) + except Exception as e: + logger.error(f"Error getting WireGuard peers: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/wireguard/peers', methods=['POST']) +def add_wireguard_peer(): + try: + from app import wireguard_manager + data = request.get_json(silent=True) or {} + result = wireguard_manager.add_peer( + name=data.get('name', ''), + public_key=data.get('public_key', ''), + endpoint_ip=data.get('endpoint', data.get('endpoint_ip', '')), + allowed_ips=data.get('allowed_ips', ''), + persistent_keepalive=data.get('persistent_keepalive', 25) + ) + return jsonify({"success": result}) + except Exception as e: + logger.error(f"Error adding WireGuard peer: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/wireguard/peers', methods=['DELETE']) +def remove_wireguard_peer(): + try: + from app import wireguard_manager + data = request.get_json(silent=True) or {} + public_key = data.get('public_key') or data.get('name', '') + return jsonify({"success": wireguard_manager.remove_peer(public_key)}) + except Exception as e: + logger.error(f"Error removing WireGuard peer: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/wireguard/status', methods=['GET']) +def get_wireguard_status(): + try: + from app import wireguard_manager + return jsonify(wireguard_manager.get_status()) + except Exception as e: + logger.error(f"Error getting WireGuard status: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/wireguard/connectivity', methods=['POST']) +def test_wireguard_connectivity(): + try: + from app import wireguard_manager + data = request.get_json(silent=True) + if data is None: + return jsonify({"error": "No data provided"}), 400 + return jsonify(wireguard_manager.test_connectivity(data)) + except Exception as e: + logger.error(f"Error testing WireGuard connectivity: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/wireguard/peers/ip', methods=['PUT']) +def update_peer_ip(): + try: + from app import wireguard_manager + data = request.get_json(silent=True) or {} + result = wireguard_manager.update_peer_ip( + data.get('public_key', data.get('peer', '')), + data.get('ip', '') + ) + return jsonify({"success": result}) + except Exception as e: + logger.error(f"Error updating peer IP: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/wireguard/peers/status', methods=['POST']) +def get_peer_status(): + try: + from app import wireguard_manager + data = request.get_json(silent=True) or {} + public_key = data.get('public_key', '') + if not public_key: + return jsonify({"error": "Missing public_key"}), 400 + return jsonify(wireguard_manager.get_peer_status(public_key)) + except Exception as e: + logger.error(f"Error getting peer status: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/wireguard/peers/statuses', methods=['GET']) +def get_all_peer_statuses(): + try: + from app import wireguard_manager + return jsonify(wireguard_manager.get_all_peer_statuses()) + except Exception as e: + logger.error(f"Error getting peer statuses: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/wireguard/network/setup', methods=['POST']) +def setup_network(): + try: + from app import wireguard_manager + success = wireguard_manager.setup_network_configuration() + if success: + return jsonify({"message": "Network configuration setup completed successfully"}) + return jsonify({"error": "Failed to setup network configuration"}), 500 + except Exception as e: + logger.error(f"Error setting up network configuration: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/wireguard/network/status', methods=['GET']) +def get_network_status(): + try: + from app import wireguard_manager + return jsonify(wireguard_manager.get_network_status()) + except Exception as e: + logger.error(f"Error getting network status: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/wireguard/peers/config', methods=['POST']) +def get_peer_config(): + try: + from app import wireguard_manager, peer_registry + data = request.get_json(silent=True) or {} + peer_name = data.get('name', data.get('peer', '')) + + peer_ip = data.get('ip', '') + peer_private_key = data.get('private_key', '') + registered = peer_registry.get_peer(peer_name) if peer_name else {} + if peer_name and (not peer_ip or not peer_private_key): + if registered: + peer_ip = peer_ip or registered.get('ip', '') + peer_private_key = peer_private_key or registered.get('private_key', '') + + server_endpoint = data.get('server_endpoint', '') + if not server_endpoint: + srv = wireguard_manager.get_server_config() + server_endpoint = srv.get('endpoint') or '' + + allowed_ips = data.get('allowed_ips') or None + if not allowed_ips and registered: + internet_access = registered.get('internet_access', True) + allowed_ips = wireguard_manager.FULL_TUNNEL_IPS if internet_access else wireguard_manager.get_split_tunnel_ips() + + result = wireguard_manager.get_peer_config( + peer_name=peer_name, + peer_ip=peer_ip, + peer_private_key=peer_private_key, + server_endpoint=server_endpoint, + allowed_ips=allowed_ips, + ) + return jsonify({"config": result}) + except Exception as e: + logger.error(f"Error getting peer config: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/wireguard/server-config', methods=['GET']) +def get_server_config(): + try: + from app import wireguard_manager + return jsonify(wireguard_manager.get_server_config()) + except Exception as e: + logger.error(f"Error getting server config: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/wireguard/refresh-ip', methods=['GET', 'POST']) +def refresh_external_ip(): + try: + from app import wireguard_manager + ip = wireguard_manager.get_external_ip(force_refresh=True) + port = wireguard_manager._get_configured_port() + return jsonify({ + 'external_ip': ip, + 'port': port, + 'endpoint': f'{ip}:{port}' if ip else None, + }) + except Exception as e: + logger.error(f"Error refreshing external IP: {e}") + return jsonify({"error": str(e)}), 500 + +@bp.route('/api/wireguard/apply-enforcement', methods=['POST']) +def apply_wireguard_enforcement(): + try: + from app import peer_registry, firewall_manager, cell_link_manager, _configured_domain, COREFILE_PATH + peers = peer_registry.list_peers() + firewall_manager.apply_all_peer_rules(peers) + firewall_manager.apply_all_dns_rules(peers, COREFILE_PATH, _configured_domain(), + cell_links=cell_link_manager.list_connections()) + return jsonify({'ok': True, 'peers': len(peers)}) + except Exception as e: + return jsonify({'error': str(e)}), 500 + +@bp.route('/api/wireguard/check-port', methods=['GET', 'POST']) +def check_wireguard_port(): + try: + from app import wireguard_manager + port_open = wireguard_manager.check_port_open() + return jsonify({'port_open': port_open, 'port': wireguard_manager._get_configured_port()}) + except Exception as e: + return jsonify({"error": str(e)}), 500 diff --git a/api/routing_manager.py b/api/routing_manager.py index fde7e44..41ffc4f 100644 --- a/api/routing_manager.py +++ b/api/routing_manager.py @@ -1074,33 +1074,17 @@ class RoutingManager(BaseServiceManager): return False def stop(self) -> bool: - """Stop routing service""" + """Stop routing service (state only — iptables rules are NOT flushed). + + Flushing iptables here would destroy WireGuard MASQUERADE and all peer + FORWARD rules applied by firewall_manager. Individual rule removal is + handled by remove_nat_rule() / remove_firewall_rule(). + """ try: - # Set internal state to stopped self._service_running = False self._save_service_state() - - # Try to clear all iptables rules (may fail in Docker without privileges) - try: - subprocess.run(['iptables', '-t', 'nat', '-F'], - check=True, timeout=10) - subprocess.run(['iptables', '-F'], - check=True, timeout=10) - except (subprocess.CalledProcessError, FileNotFoundError) as e: - logger.warning(f"Could not clear iptables rules: {e}") - # Continue anyway - service is considered stopped - - # Try to disable IP forwarding (may fail in Docker without privileges) - try: - subprocess.run(['sysctl', '-w', 'net.ipv4.ip_forward=0'], - check=True, timeout=10) - except (subprocess.CalledProcessError, FileNotFoundError) as e: - logger.warning(f"Could not disable IP forwarding: {e}") - # Continue anyway - service is considered stopped - - logger.info("Routing service stopped successfully") + logger.info("Routing service stopped (state only; iptables untouched)") return True - except Exception as e: logger.error(f"Failed to stop routing service: {e}") # Even if system commands fail, we consider the service stopped diff --git a/api/wireguard_manager.py b/api/wireguard_manager.py index 473c6aa..a5884b0 100644 --- a/api/wireguard_manager.py +++ b/api/wireguard_manager.py @@ -365,6 +365,8 @@ class WireGuardManager(BaseServiceManager): current_peer['ips'] = line.split('=', 1)[1].strip() elif line.startswith('PersistentKeepalive'): current_peer['ka'] = line.split('=', 1)[1].strip() + elif line.startswith('Endpoint'): + current_peer['endpoint'] = line.split('=', 1)[1].strip() elif line == '' and 'pub' in current_peer: desired[current_peer['pub']] = current_peer current_peer = None @@ -397,6 +399,8 @@ class WireGuardManager(BaseServiceManager): 'peer', pub, 'allowed-ips', p.get('ips', ''), 'persistent-keepalive', p.get('ka', '25')] + if p.get('endpoint'): + args += ['endpoint', p['endpoint']] subprocess.run(args, capture_output=True, timeout=5) logger.info(f'wg set applied: {len(desired)} peers') @@ -483,7 +487,7 @@ class WireGuardManager(BaseServiceManager): logger.error(f'add_cell_peer: invalid endpoint port: {endpoint!r}') return False try: - ipaddress.ip_network(vpn_subnet, strict=False) + remote_net = ipaddress.ip_network(vpn_subnet, strict=False) except ValueError as e: logger.error(f'add_cell_peer: invalid vpn_subnet {vpn_subnet!r}: {e}') return False @@ -491,6 +495,17 @@ class WireGuardManager(BaseServiceManager): if any(c.isspace() for c in vpn_subnet): logger.error(f'add_cell_peer: vpn_subnet contains whitespace: {vpn_subnet!r}') return False + # Reject subnets that overlap the local WG network — would create a routing blackhole + try: + local_net = ipaddress.ip_network(self._get_configured_network(), strict=False) + if local_net.overlaps(remote_net): + logger.error( + f'add_cell_peer: vpn_subnet {vpn_subnet!r} overlaps local WG network ' + f'{str(local_net)!r} — use a distinct subnet on the remote cell' + ) + return False + except Exception: + pass try: content = self._read_config() peer_block = ( diff --git a/tests/test_cell_link_manager.py b/tests/test_cell_link_manager.py index 056f5b4..0ab5484 100644 --- a/tests/test_cell_link_manager.py +++ b/tests/test_cell_link_manager.py @@ -160,3 +160,241 @@ class TestCellLinkManagerConnections(unittest.TestCase): if __name__ == '__main__': unittest.main() + + +# --------------------------------------------------------------------------- +# TestAddConnectionAtomicity +# --------------------------------------------------------------------------- + +class TestAddConnectionAtomicity(unittest.TestCase): + """Verify that add_connection rolls back correctly when WG or DNS steps fail.""" + + def setUp(self): + self.test_dir = tempfile.mkdtemp() + self.wg = _make_wg_mock() + self.nm = _make_nm_mock() + self.mgr = CellLinkManager(self.test_dir, self.test_dir, self.wg, self.nm) + + def tearDown(self): + shutil.rmtree(self.test_dir) + + def test_wg_fail_does_not_call_dns(self): + """When add_cell_peer returns False, add_cell_dns_forward must NOT be called.""" + self.wg.add_cell_peer.return_value = False + with self.assertRaises(RuntimeError): + self.mgr.add_connection(SAMPLE_INVITE) + self.nm.add_cell_dns_forward.assert_not_called() + + def test_wg_fail_does_not_persist_link(self): + """When WG fails, list_connections() must still return [] (nothing persisted).""" + self.wg.add_cell_peer.return_value = False + with self.assertRaises(RuntimeError): + self.mgr.add_connection(SAMPLE_INVITE) + self.assertEqual(self.mgr.list_connections(), []) + + def test_wg_fail_raises_runtime_error(self): + """add_connection raises RuntimeError (not some other exception) when WG fails.""" + self.wg.add_cell_peer.return_value = False + with self.assertRaises(RuntimeError): + self.mgr.add_connection(SAMPLE_INVITE) + + def test_dns_warning_still_persists_link(self): + """When DNS returns warnings (not a hard failure), the link IS still saved.""" + self.nm.add_cell_dns_forward.return_value = { + 'restarted': [], + 'warnings': ['CoreDNS reload timed out'], + } + self.mgr.add_connection(SAMPLE_INVITE) + links = self.mgr.list_connections() + self.assertEqual(len(links), 1) + self.assertEqual(links[0]['cell_name'], 'office') + + def test_dns_warning_does_not_raise(self): + """When DNS returns warnings, add_connection completes without raising.""" + self.nm.add_cell_dns_forward.return_value = { + 'restarted': [], + 'warnings': ['CoreDNS reload timed out'], + } + try: + self.mgr.add_connection(SAMPLE_INVITE) + except Exception as e: + self.fail(f"add_connection raised unexpectedly with DNS warnings: {e}") + + +# --------------------------------------------------------------------------- +# TestAddConnectionPermissions +# --------------------------------------------------------------------------- + +class TestAddConnectionPermissions(unittest.TestCase): + """Verify that inbound_services controls the permissions field on the saved link.""" + + def setUp(self): + self.test_dir = tempfile.mkdtemp() + self.wg = _make_wg_mock() + self.nm = _make_nm_mock() + self.mgr = CellLinkManager(self.test_dir, self.test_dir, self.wg, self.nm) + + def tearDown(self): + shutil.rmtree(self.test_dir) + + def _get_link(self): + links = self.mgr.list_connections() + self.assertEqual(len(links), 1) + return links[0] + + def test_add_with_no_inbound_defaults_all_deny(self): + """No inbound_services arg → all inbound permissions False.""" + self.mgr.add_connection(SAMPLE_INVITE) + link = self._get_link() + inbound = link['permissions']['inbound'] + for service, allowed in inbound.items(): + self.assertFalse(allowed, f"Expected {service} to be False, got {allowed}") + + def test_add_with_inbound_services_sets_them(self): + """inbound_services=['calendar','files'] → those two True, others False.""" + self.mgr.add_connection(SAMPLE_INVITE, inbound_services=['calendar', 'files']) + link = self._get_link() + inbound = link['permissions']['inbound'] + self.assertTrue(inbound['calendar']) + self.assertTrue(inbound['files']) + self.assertFalse(inbound['mail']) + self.assertFalse(inbound['webdav']) + + def test_inbound_invalid_service_ignored(self): + """Passing 'badservice' in inbound_services does not appear in permissions.""" + self.mgr.add_connection(SAMPLE_INVITE, inbound_services=['badservice', 'calendar']) + link = self._get_link() + inbound = link['permissions']['inbound'] + self.assertNotIn('badservice', inbound) + # valid one was still applied + self.assertTrue(inbound['calendar']) + + +# --------------------------------------------------------------------------- +# TestUpdatePermissions +# --------------------------------------------------------------------------- + +class TestUpdatePermissions(unittest.TestCase): + """Tests for the new update_permissions / get_permissions methods.""" + + def setUp(self): + self.test_dir = tempfile.mkdtemp() + self.wg = _make_wg_mock() + self.nm = _make_nm_mock() + self.mgr = CellLinkManager(self.test_dir, self.test_dir, self.wg, self.nm) + # Add a connection so there is something to update + self.mgr.add_connection(SAMPLE_INVITE) + + def tearDown(self): + shutil.rmtree(self.test_dir) + + def test_update_sets_inbound_values(self): + """update_permissions with inbound={'calendar': True} persists correctly.""" + with patch('cell_link_manager.firewall_manager', create=True) as mock_fm: + mock_fm.apply_cell_rules = MagicMock() + self.mgr.update_permissions('office', {'calendar': True}, {}) + # Re-read from disk to confirm persistence + mgr2 = CellLinkManager(self.test_dir, self.test_dir, self.wg, self.nm) + perms = mgr2.get_permissions('office') + self.assertTrue(perms['inbound']['calendar']) + self.assertFalse(perms['inbound']['files']) + + def test_update_rejects_unknown_service_by_cleaning_it_out(self): + """update_permissions with inbound={'bad': True} — 'bad' must not appear in saved perms.""" + with patch('cell_link_manager.firewall_manager', create=True) as mock_fm: + mock_fm.apply_cell_rules = MagicMock() + self.mgr.update_permissions('office', {'bad': True, 'calendar': True}, {}) + perms = self.mgr.get_permissions('office') + self.assertNotIn('bad', perms['inbound']) + self.assertTrue(perms['inbound']['calendar']) + + def test_update_nonexistent_cell_raises(self): + """update_permissions on an unknown cell_name raises ValueError.""" + with self.assertRaises(ValueError): + self.mgr.update_permissions('nosuchcell', {}, {}) + + def test_get_permissions_returns_correct(self): + """get_permissions returns the dict that was saved by update_permissions.""" + with patch('cell_link_manager.firewall_manager', create=True) as mock_fm: + mock_fm.apply_cell_rules = MagicMock() + self.mgr.update_permissions( + 'office', + inbound={'calendar': True, 'files': False}, + outbound={'mail': True}, + ) + perms = self.mgr.get_permissions('office') + self.assertIn('inbound', perms) + self.assertIn('outbound', perms) + self.assertTrue(perms['inbound']['calendar']) + self.assertFalse(perms['inbound']['files']) + self.assertTrue(perms['outbound']['mail']) + + def test_get_permissions_nonexistent_cell_raises(self): + """get_permissions on an unknown cell_name raises ValueError.""" + with self.assertRaises(ValueError): + self.mgr.get_permissions('nosuchcell') + + +# --------------------------------------------------------------------------- +# TestLoadMigration +# --------------------------------------------------------------------------- + +class TestLoadMigration(unittest.TestCase): + """Verify _load() lazily injects permissions field when it is missing.""" + + def setUp(self): + self.test_dir = tempfile.mkdtemp() + self.wg = _make_wg_mock() + self.nm = _make_nm_mock() + + def tearDown(self): + shutil.rmtree(self.test_dir) + + def test_load_injects_permissions_if_missing(self): + """Write cell_links.json without permissions; _load should add all-False defaults.""" + links_file = os.path.join(self.test_dir, 'cell_links.json') + legacy_links = [ + { + 'cell_name': 'legacy-office', + 'public_key': 'officepubkey=', + 'vpn_subnet': '10.1.0.0/24', + 'dns_ip': '10.1.0.1', + 'domain': 'legacy-office.cell', + # NO 'permissions' key — simulates pre-migration data + } + ] + with open(links_file, 'w') as f: + json.dump(legacy_links, f) + + mgr = CellLinkManager(self.test_dir, self.test_dir, self.wg, self.nm) + links = mgr.list_connections() + + self.assertEqual(len(links), 1) + link = links[0] + self.assertIn('permissions', link) + perms = link['permissions'] + self.assertIn('inbound', perms) + self.assertIn('outbound', perms) + for service in ('calendar', 'files', 'mail', 'webdav'): + self.assertFalse(perms['inbound'][service]) + self.assertFalse(perms['outbound'][service]) + + def test_load_migration_persists_to_disk(self): + """After migration, re-loading the same file returns the injected permissions.""" + links_file = os.path.join(self.test_dir, 'cell_links.json') + with open(links_file, 'w') as f: + json.dump([{ + 'cell_name': 'old-cell', + 'public_key': 'somepubkey=', + 'vpn_subnet': '10.2.0.0/24', + 'dns_ip': '10.2.0.1', + 'domain': 'old-cell.cell', + }], f) + + mgr1 = CellLinkManager(self.test_dir, self.test_dir, self.wg, self.nm) + mgr1.list_connections() # triggers migration + save + + # Read the file directly and confirm permissions are now on disk + with open(links_file) as f: + raw = json.load(f) + self.assertIn('permissions', raw[0]) diff --git a/tests/test_cells_endpoints.py b/tests/test_cells_endpoints.py index 6a2c351..f3a579d 100644 --- a/tests/test_cells_endpoints.py +++ b/tests/test_cells_endpoints.py @@ -291,5 +291,230 @@ class TestGetCellConnectionStatus(unittest.TestCase): self.assertIn('error', json.loads(r.data)) +class TestAddCellRuntimeError(unittest.TestCase): + """POST /api/cells — RuntimeError from the manager must now return 400, not 500.""" + + def setUp(self): + app.config['TESTING'] = True + self.client = app.test_client() + + @patch('app.cell_link_manager') + def test_add_cell_runtime_error_returns_400(self, mock_clm): + """When add_connection raises RuntimeError (WG failure), endpoint returns 400.""" + mock_clm.add_connection.side_effect = RuntimeError('Failed to add WireGuard peer') + r = self.client.post( + '/api/cells', + data=json.dumps(_VALID_CELL_BODY), + content_type='application/json', + ) + self.assertEqual(r.status_code, 400) + data = json.loads(r.data) + self.assertIn('error', data) + + @patch('app.cell_link_manager') + def test_add_cell_runtime_error_body_contains_message(self, mock_clm): + """The 400 response for a RuntimeError includes the error message.""" + mock_clm.add_connection.side_effect = RuntimeError('WireGuard peer add failed') + r = self.client.post( + '/api/cells', + data=json.dumps(_VALID_CELL_BODY), + content_type='application/json', + ) + data = json.loads(r.data) + self.assertIn('WireGuard', data['error']) + + +class TestListServices(unittest.TestCase): + """GET /api/cells/services""" + + def setUp(self): + app.config['TESTING'] = True + self.client = app.test_client() + + def test_list_services_returns_200(self): + """GET /api/cells/services returns HTTP 200.""" + r = self.client.get('/api/cells/services') + self.assertEqual(r.status_code, 200) + + def test_list_services_returns_services_key(self): + """Response body has a 'services' key.""" + r = self.client.get('/api/cells/services') + data = json.loads(r.data) + self.assertIn('services', data) + + def test_list_services_returns_list(self): + """'services' value is a non-empty list.""" + r = self.client.get('/api/cells/services') + data = json.loads(r.data) + self.assertIsInstance(data['services'], list) + self.assertGreater(len(data['services']), 0) + + def test_list_services_includes_known_services(self): + """'services' includes the four known shareable services.""" + r = self.client.get('/api/cells/services') + services = json.loads(r.data)['services'] + for expected in ('calendar', 'files', 'mail', 'webdav'): + self.assertIn(expected, services) + + +class TestGetCellPermissions(unittest.TestCase): + """GET /api/cells//permissions""" + + def setUp(self): + app.config['TESTING'] = True + self.client = app.test_client() + + @patch('app.cell_link_manager') + def test_get_permissions_returns_200(self, mock_clm): + """GET /api/cells/office/permissions returns 200 when cell exists.""" + mock_clm.get_permissions.return_value = { + 'inbound': {'calendar': True, 'files': False, 'mail': False, 'webdav': False}, + 'outbound': {'calendar': False, 'files': False, 'mail': False, 'webdav': False}, + } + r = self.client.get('/api/cells/office/permissions') + self.assertEqual(r.status_code, 200) + + @patch('app.cell_link_manager') + def test_get_permissions_response_has_inbound_and_outbound(self, mock_clm): + """Response body contains 'inbound' and 'outbound' keys.""" + mock_clm.get_permissions.return_value = { + 'inbound': {'calendar': False, 'files': False, 'mail': False, 'webdav': False}, + 'outbound': {'calendar': False, 'files': False, 'mail': False, 'webdav': False}, + } + r = self.client.get('/api/cells/office/permissions') + data = json.loads(r.data) + self.assertIn('inbound', data) + self.assertIn('outbound', data) + + @patch('app.cell_link_manager') + def test_get_permissions_unknown_cell_returns_404(self, mock_clm): + """ValueError from get_permissions maps to 404.""" + mock_clm.get_permissions.side_effect = ValueError('cell not found') + r = self.client.get('/api/cells/nosuchcell/permissions') + self.assertEqual(r.status_code, 404) + self.assertIn('error', json.loads(r.data)) + + @patch('app.cell_link_manager') + def test_get_permissions_passes_cell_name(self, mock_clm): + """The cell_name URL segment is forwarded to get_permissions.""" + mock_clm.get_permissions.return_value = {'inbound': {}, 'outbound': {}} + self.client.get('/api/cells/faraway/permissions') + mock_clm.get_permissions.assert_called_once_with('faraway') + + +class TestUpdateCellPermissions(unittest.TestCase): + """PUT /api/cells//permissions""" + + def setUp(self): + app.config['TESTING'] = True + self.client = app.test_client() + + _VALID_PERM_BODY = { + 'inbound': {'calendar': True, 'files': False, 'mail': False, 'webdav': False}, + 'outbound': {'calendar': False, 'files': False, 'mail': False, 'webdav': False}, + } + + @patch('app.cell_link_manager') + @patch('app.peer_registry') + @patch('app.firewall_manager') + @patch('app.config_manager') + def test_update_permissions_returns_200(self, mock_cfg, mock_fm, mock_pr, mock_clm): + """PUT with valid inbound/outbound returns 200.""" + mock_cfg.configs = {'_identity': {'domain': 'cell'}} + mock_pr.list_peers.return_value = [] + mock_clm.list_connections.return_value = [] + mock_clm.update_permissions.return_value = { + 'cell_name': 'office', + 'permissions': self._VALID_PERM_BODY, + } + mock_fm.apply_all_dns_rules.return_value = True + r = self.client.put( + '/api/cells/office/permissions', + data=json.dumps(self._VALID_PERM_BODY), + content_type='application/json', + ) + self.assertEqual(r.status_code, 200) + data = json.loads(r.data) + self.assertIn('message', data) + self.assertIn('link', data) + + @patch('app.cell_link_manager') + def test_update_permissions_unknown_service_returns_400(self, mock_clm): + """PUT body containing an unknown service name returns 400.""" + body = { + 'inbound': {'bad_service': True, 'calendar': True}, + 'outbound': {}, + } + r = self.client.put( + '/api/cells/office/permissions', + data=json.dumps(body), + content_type='application/json', + ) + self.assertEqual(r.status_code, 400) + data = json.loads(r.data) + self.assertIn('error', data) + # update_permissions should NOT have been called when validation fails + mock_clm.update_permissions.assert_not_called() + + @patch('app.cell_link_manager') + def test_update_permissions_unknown_cell_returns_404(self, mock_clm): + """ValueError from update_permissions (cell not found) maps to 404.""" + mock_clm.update_permissions.side_effect = ValueError('cell not found') + r = self.client.put( + '/api/cells/nosuchcell/permissions', + data=json.dumps(self._VALID_PERM_BODY), + content_type='application/json', + ) + self.assertEqual(r.status_code, 404) + self.assertIn('error', json.loads(r.data)) + + @patch('app.cell_link_manager') + def test_update_permissions_no_body_returns_400(self, mock_clm): + """PUT with no JSON body returns 400.""" + r = self.client.put('/api/cells/office/permissions') + self.assertEqual(r.status_code, 400) + self.assertIn('error', json.loads(r.data)) + mock_clm.update_permissions.assert_not_called() + + @patch('app.cell_link_manager') + def test_update_permissions_outbound_unknown_service_returns_400(self, mock_clm): + """Unknown service in outbound (not just inbound) also returns 400.""" + body = { + 'inbound': {'calendar': True}, + 'outbound': {'hacked': True}, + } + r = self.client.put( + '/api/cells/office/permissions', + data=json.dumps(body), + content_type='application/json', + ) + self.assertEqual(r.status_code, 400) + + @patch('app.cell_link_manager') + @patch('app.peer_registry') + @patch('app.firewall_manager') + @patch('app.config_manager') + def test_update_permissions_passes_inbound_outbound_to_manager( + self, mock_cfg, mock_fm, mock_pr, mock_clm): + """update_permissions is called with inbound and outbound dicts from the body.""" + mock_cfg.configs = {'_identity': {'domain': 'cell'}} + mock_pr.list_peers.return_value = [] + mock_clm.list_connections.return_value = [] + mock_clm.update_permissions.return_value = { + 'cell_name': 'office', 'permissions': self._VALID_PERM_BODY + } + mock_fm.apply_all_dns_rules.return_value = True + self.client.put( + '/api/cells/office/permissions', + data=json.dumps(self._VALID_PERM_BODY), + content_type='application/json', + ) + mock_clm.update_permissions.assert_called_once_with( + 'office', + self._VALID_PERM_BODY['inbound'], + self._VALID_PERM_BODY['outbound'], + ) + + if __name__ == '__main__': unittest.main() diff --git a/tests/test_config_apply.py b/tests/test_config_apply.py index de7cc42..c15e2f1 100644 --- a/tests/test_config_apply.py +++ b/tests/test_config_apply.py @@ -65,18 +65,22 @@ class TestConfigApplyRoute(unittest.TestCase): data = json.loads(r.data) self.assertTrue(data.get('restart_in_progress')) - # ── Pending state cleared after apply ────────────────────────────────── + # ── Pending state marked "applying" after apply (not immediately cleared) ─ @patch('threading.Thread') @patch('docker.from_env') - def test_apply_clears_pending_state(self, mock_docker, mock_thread): + def test_apply_sets_applying_flag(self, mock_docker, mock_thread): mock_docker.side_effect = Exception('no docker in test') # Don't actually start the thread so we don't need subprocess mock_thread.return_value = MagicMock() _set_pending_restart(['config changed'], ['*']) self.client.post('/api/config/apply') pending = config_manager.configs.get('_pending_restart', {}) - self.assertFalse(pending.get('needs_restart', False)) + # The route now marks needs_restart=True + applying=True instead of clearing + # immediately. The helper container clears the flag on success; if the helper + # fails, needs_restart stays set so the UI continues showing pending changes. + self.assertTrue(pending.get('needs_restart', False)) + self.assertTrue(pending.get('applying', False)) # ── needs_network_recreate=True → helper script includes 'down' ──────── diff --git a/tests/test_firewall_manager.py b/tests/test_firewall_manager.py index cae337d..1790267 100644 --- a/tests/test_firewall_manager.py +++ b/tests/test_firewall_manager.py @@ -406,5 +406,221 @@ class TestUpdateServiceIps(unittest.TestCase): self.assertNotIn('172.20.0.21', dest_ips) +# --------------------------------------------------------------------------- +# TestCellRules +# --------------------------------------------------------------------------- + +class TestCellRules(unittest.TestCase): + """Tests for apply_cell_rules, clear_cell_rules, _cell_tag, and apply_all_cell_rules.""" + + # ── helpers ─────────────────────────────────────────────────────────────── + + def _capture_apply(self, cell_name, vpn_subnet, inbound_services): + """Run apply_cell_rules with _wg_exec mocked; return list of captured iptables arg lists.""" + calls_made = [] + + def fake_wg_exec(args): + calls_made.append(args) + m = MagicMock() + m.returncode = 0 + m.stdout = '' + return m + + with patch.object(firewall_manager, '_wg_exec', side_effect=fake_wg_exec): + firewall_manager.apply_cell_rules(cell_name, vpn_subnet, inbound_services) + + return [c for c in calls_made if 'iptables' in c] + + def _targets_for_dest(self, iptables_calls, dest_ip): + """Return list of -j targets where -d matches dest_ip.""" + targets = [] + for c in iptables_calls: + if '-d' in c and dest_ip in c and '-j' in c: + targets.append(c[c.index('-j') + 1]) + return targets + + # ── _cell_tag ───────────────────────────────────────────────────────────── + + def test_cell_tag_sanitises_spaces_and_punctuation(self): + """_cell_tag replaces non-alphanumeric chars with dashes.""" + tag = firewall_manager._cell_tag('my cell!') + self.assertTrue(tag.startswith('pic-cell-')) + self.assertNotIn(' ', tag) + self.assertNotIn('!', tag) + + def test_cell_tag_lowercase(self): + """_cell_tag lowercases the cell name.""" + tag = firewall_manager._cell_tag('Office') + self.assertIn('office', tag) + + def test_cell_tag_has_pic_cell_prefix(self): + """_cell_tag always starts with 'pic-cell-'.""" + self.assertTrue(firewall_manager._cell_tag('remote').startswith('pic-cell-')) + + def test_cell_tag_distinct_from_peer_tag(self): + """A cell tag must not equal the peer comment for the same string.""" + cell_tag = firewall_manager._cell_tag('10.0.0.2') + peer_tag = firewall_manager._peer_comment('10.0.0.2') + self.assertNotEqual(cell_tag, peer_tag) + + # ── apply_cell_rules — catch-all DROP ───────────────────────────────────── + + def test_apply_cell_rules_sends_catch_all_drop(self): + """apply_cell_rules always inserts a DROP for the entire vpn_subnet.""" + calls = self._capture_apply('office', '10.0.1.0/24', ['calendar']) + subnet_drops = [ + c for c in calls + if '-s' in c and '10.0.1.0/24' in c + and '-j' in c and c[c.index('-j') + 1] == 'DROP' + and '-d' not in c # catch-all has no destination + ] + self.assertTrue(subnet_drops, "Expected a catch-all DROP rule for the subnet") + + def test_apply_cell_rules_sends_accept_for_allowed_service(self): + """apply_cell_rules inserts ACCEPT for the calendar VIP when calendar is in inbound.""" + calls = self._capture_apply('office', '10.0.1.0/24', ['calendar']) + calendar_ip = firewall_manager.SERVICE_IPS['calendar'] + calendar_targets = self._targets_for_dest(calls, calendar_ip) + self.assertIn('ACCEPT', calendar_targets) + + def test_apply_cell_rules_sends_drop_for_disallowed_service(self): + """apply_cell_rules inserts DROP for a service not in inbound_services.""" + calls = self._capture_apply('office', '10.0.1.0/24', ['calendar']) + files_ip = firewall_manager.SERVICE_IPS['files'] + files_targets = self._targets_for_dest(calls, files_ip) + self.assertIn('DROP', files_targets) + + # ── apply_cell_rules — empty inbound (all-deny) ─────────────────────────── + + def test_apply_cell_rules_empty_inbound_all_drop(self): + """With inbound_services=[], all per-service rules are DROP.""" + calls = self._capture_apply('office', '10.0.1.0/24', []) + for service, svc_ip in firewall_manager.SERVICE_IPS.items(): + svc_targets = self._targets_for_dest(calls, svc_ip) + self.assertTrue(svc_targets, + f"Expected at least one rule for {service} ({svc_ip})") + self.assertNotIn('ACCEPT', svc_targets, + f"{service} should be DROP when not in inbound_services") + + # ── apply_cell_rules — all inbound (all-accept) ─────────────────────────── + + def test_apply_cell_rules_all_inbound_all_accept(self): + """With all four services in inbound, all per-service rules are ACCEPT.""" + all_services = list(firewall_manager.SERVICE_IPS.keys()) + calls = self._capture_apply('office', '10.0.1.0/24', all_services) + for service, svc_ip in firewall_manager.SERVICE_IPS.items(): + svc_targets = self._targets_for_dest(calls, svc_ip) + self.assertIn('ACCEPT', svc_targets, + f"{service} should be ACCEPT when in inbound_services") + + # ── apply_cell_rules — all rules tagged ─────────────────────────────────── + + def test_apply_cell_rules_all_rules_tagged_with_cell_tag(self): + """Every insertion rule must carry the cell's comment tag.""" + calls = self._capture_apply('office', '10.0.1.0/24', ['calendar']) + tag = firewall_manager._cell_tag('office') + for c in calls: + if '-I' in c: + self.assertIn(tag, c, f"Rule missing cell tag: {c}") + + # ── clear_cell_rules — noop when no matching rules ──────────────────────── + + def test_clear_cell_rules_noop_when_no_rules(self): + """When iptables-save returns no pic-cell-office lines, iptables-restore is NOT called.""" + save_output = '*filter\n:FORWARD ACCEPT [0:0]\nCOMMIT\n' + + def fake_wg_exec(args): + m = MagicMock() + m.returncode = 0 + m.stdout = save_output + return m + + with patch.object(firewall_manager, '_wg_exec', side_effect=fake_wg_exec), \ + patch('subprocess.run') as mock_restore: + firewall_manager.clear_cell_rules('office') + + mock_restore.assert_not_called() + + def test_clear_cell_rules_removes_tagged_lines(self): + """clear_cell_rules removes lines carrying the cell tag and keeps others.""" + tag = firewall_manager._cell_tag('office') + save_output = ( + '*filter\n' + ':FORWARD ACCEPT [0:0]\n' + f'-A FORWARD -s 10.0.1.0/24 -m comment --comment "{tag}" -j DROP\n' + '-A FORWARD -s 10.0.0.2 -m comment --comment "pic-peer-10-0-0-2/32" -j ACCEPT\n' + 'COMMIT\n' + ) + restored = [] + + def fake_wg_exec(args): + m = MagicMock() + m.returncode = 0 + if args == ['iptables-save']: + m.stdout = save_output + return m + + def fake_restore(cmd, input, **kwargs): + restored.append(input) + m = MagicMock() + m.returncode = 0 + return m + + with patch.object(firewall_manager, '_wg_exec', side_effect=fake_wg_exec), \ + patch('subprocess.run', side_effect=fake_restore): + firewall_manager.clear_cell_rules('office') + + self.assertEqual(len(restored), 1) + content = restored[0] + self.assertNotIn(tag, content) + # peer rule for a different entity must survive + self.assertIn('pic-peer-10-0-0-2/32', content) + + # ── apply_all_cell_rules ────────────────────────────────────────────────── + + def test_apply_all_cell_rules_calls_apply_for_each(self): + """apply_all_cell_rules calls apply_cell_rules once per link with correct args.""" + cell_links = [ + { + 'cell_name': 'office', + 'vpn_subnet': '10.1.0.0/24', + 'permissions': {'inbound': {'calendar': True, 'files': False, 'mail': False, 'webdav': False}, + 'outbound': {}}, + }, + { + 'cell_name': 'cabin', + 'vpn_subnet': '10.2.0.0/24', + 'permissions': {'inbound': {'calendar': False, 'files': True, 'mail': False, 'webdav': False}, + 'outbound': {}}, + }, + ] + with patch.object(firewall_manager, 'apply_cell_rules', return_value=True) as mock_apply: + firewall_manager.apply_all_cell_rules(cell_links) + + self.assertEqual(mock_apply.call_count, 2) + call_kwargs = {c.args[0]: c.args for c in mock_apply.call_args_list} + self.assertIn('office', call_kwargs) + self.assertIn('cabin', call_kwargs) + office_args = call_kwargs['office'] + self.assertEqual(office_args[1], '10.1.0.0/24') + self.assertIn('calendar', office_args[2]) + self.assertNotIn('files', office_args[2]) + + def test_apply_all_cell_rules_skips_links_with_missing_fields(self): + """Links without cell_name or vpn_subnet are silently skipped.""" + cell_links = [ + {'vpn_subnet': '10.1.0.0/24'}, # no cell_name + {'cell_name': 'broken'}, # no vpn_subnet + {'cell_name': 'office', 'vpn_subnet': '10.3.0.0/24', + 'permissions': {'inbound': {}, 'outbound': {}}}, + ] + with patch.object(firewall_manager, 'apply_cell_rules', return_value=True) as mock_apply: + firewall_manager.apply_all_cell_rules(cell_links) + + # Only the complete entry should be processed + self.assertEqual(mock_apply.call_count, 1) + self.assertEqual(mock_apply.call_args.args[0], 'office') + + if __name__ == '__main__': unittest.main() diff --git a/tests/test_peer_management_edge_cases.py b/tests/test_peer_management_edge_cases.py index 2a9d203..e8f347a 100644 --- a/tests/test_peer_management_edge_cases.py +++ b/tests/test_peer_management_edge_cases.py @@ -31,7 +31,7 @@ class TestAddPeerSubnetExhaustion(unittest.TestCase): app.config['TESTING'] = True self.client = app.test_client() - @patch('app._next_peer_ip') + @patch('routes.peers._next_peer_ip') @patch('app.auth_manager') def test_add_peer_returns_409_when_subnet_exhausted(self, mock_auth, mock_next_ip): mock_auth.create_user.return_value = True @@ -50,7 +50,7 @@ class TestAddPeerSubnetExhaustion(unittest.TestCase): data = json.loads(r.data) self.assertIn('error', data) - @patch('app._next_peer_ip') + @patch('routes.peers._next_peer_ip') @patch('app.auth_manager') def test_add_peer_409_error_message_mentions_ip(self, mock_auth, mock_next_ip): mock_auth.create_user.return_value = True diff --git a/tests/test_peer_provisioning.py b/tests/test_peer_provisioning.py index ba323f5..d579513 100644 --- a/tests/test_peer_provisioning.py +++ b/tests/test_peer_provisioning.py @@ -370,3 +370,50 @@ def test_delete_nonexistent_peer_returns_gracefully(admin_client, mock_peer_regi r = _delete_peer(admin_client, 'nobody') # Route must not 500 when the peer simply doesn't exist assert r.status_code in (200, 404) + + +# ── POST /api/peers — firewall rollback (A3) ────────────────────────────────── + +def test_create_peer_rolls_back_firewall_on_dns_failure( + auth_mgr, mock_email_mgr, mock_calendar_mgr, + mock_file_mgr, mock_wg_mgr, mock_peer_registry): + """If apply_all_dns_rules raises after firewall rules were applied, the peer + add must call clear_peer_rules to undo the firewall state (A3 fix).""" + app.config['TESTING'] = True + app.config['SECRET_KEY'] = 'test-secret' + + mock_fw = MagicMock() + mock_fw.apply_peer_rules.return_value = True + mock_fw.apply_all_dns_rules.side_effect = RuntimeError('CoreDNS unreachable') + + patches = [ + patch('app.auth_manager', auth_mgr), + patch('app.email_manager', mock_email_mgr), + patch('app.calendar_manager', mock_calendar_mgr), + patch('app.file_manager', mock_file_mgr), + patch('app.wireguard_manager', mock_wg_mgr), + patch('app.peer_registry', mock_peer_registry), + patch('app.firewall_manager', mock_fw), + ] + try: + import auth_routes + patches.append(patch.object(auth_routes, 'auth_manager', auth_mgr, create=True)) + except (ImportError, AttributeError): + pass + + started = [p.start() for p in patches] + try: + with app.test_client() as client: + r = _login(client) + assert r.status_code == 200 + resp = _post_peer(client) + assert resp.status_code == 500, ( + f'expected 500 on DNS failure but got {resp.status_code}' + ) + # Firewall rules must be cleared as part of rollback + mock_fw.clear_peer_rules.assert_called_once() + # Registry entry must also be rolled back + mock_peer_registry.remove_peer.assert_called_once() + finally: + for p in patches: + p.stop() diff --git a/tests/test_wireguard_endpoints.py b/tests/test_wireguard_endpoints.py index 17e55a3..817e144 100644 --- a/tests/test_wireguard_endpoints.py +++ b/tests/test_wireguard_endpoints.py @@ -238,7 +238,7 @@ class TestWireGuardPortPropagation(unittest.TestCase): app.config['TESTING'] = True self.client = app.test_client() - @patch('app._set_pending_restart') + @patch('routes.config._set_pending_restart') @patch('app.wireguard_manager') @patch('app.config_manager') def test_wireguard_port_identity_change_calls_apply_config( @@ -263,7 +263,7 @@ class TestWireGuardPortPropagation(unittest.TestCase): self.assertEqual(r.status_code, 200) mock_wg.apply_config.assert_called_once_with({'port': 51821}) - @patch('app._set_pending_restart') + @patch('routes.config._set_pending_restart') @patch('app.wireguard_manager') @patch('app.config_manager') def test_wireguard_port_same_value_does_not_call_apply_config( @@ -305,7 +305,7 @@ class TestApplyPendingConfigForceRecreate(unittest.TestCase): app.config['TESTING'] = True self.client = app.test_client() - @patch('app._clear_pending_restart') + @patch('routes.config._clear_pending_restart') @patch('app.config_manager') def test_apply_pending_uses_force_recreate(self, mock_cm, mock_clear): """apply_pending_config for specific containers must include --force-recreate.""" @@ -325,7 +325,7 @@ class TestApplyPendingConfigForceRecreate(unittest.TestCase): t.start = lambda: None return t - with patch('app.threading.Thread', side_effect=patched_thread): + with patch('routes.config.threading.Thread', side_effect=patched_thread): r = self.client.post('/api/config/apply') self.assertEqual(r.status_code, 200) @@ -344,7 +344,7 @@ class TestApplyPendingConfigForceRecreate(unittest.TestCase): f'--force-recreate missing from docker compose command: {cmd}') self.assertIn('wireguard', cmd) - @patch('app._clear_pending_restart') + @patch('routes.config._clear_pending_restart') @patch('app.config_manager') def test_apply_pending_all_services_no_force_recreate(self, mock_cm, mock_clear): """All-services restart ('*') uses a helper container (Popen), not subprocess.run.""" @@ -364,7 +364,7 @@ class TestApplyPendingConfigForceRecreate(unittest.TestCase): t.start = lambda: None return t - with patch('app.threading.Thread', side_effect=patched_thread): + with patch('routes.config.threading.Thread', side_effect=patched_thread): r = self.client.post('/api/config/apply') self.assertEqual(r.status_code, 200) diff --git a/tests/test_wireguard_manager.py b/tests/test_wireguard_manager.py index 50d5429..e2152c5 100644 --- a/tests/test_wireguard_manager.py +++ b/tests/test_wireguard_manager.py @@ -629,5 +629,80 @@ class TestWireGuardSysctlAndPortCheck(unittest.TestCase): self.assertEqual(statuses, {}) +class TestAddCellPeerSubnetOverlap(unittest.TestCase): + """Verify that add_cell_peer rejects a vpn_subnet that overlaps the local WG network.""" + + def setUp(self): + self.test_dir = tempfile.mkdtemp() + self.data_dir = os.path.join(self.test_dir, 'data') + self.config_dir = os.path.join(self.test_dir, 'config') + os.makedirs(self.data_dir, exist_ok=True) + os.makedirs(self.config_dir, exist_ok=True) + patcher = patch.object(WireGuardManager, '_syncconf', return_value=None) + self.mock_sync = patcher.start() + self.addCleanup(patcher.stop) + self.wg = WireGuardManager(self.data_dir, self.config_dir) + # Write a known wg0.conf so _get_configured_network() returns 10.0.0.0/24 + self._write_wg_conf(address='10.0.0.1/24') + + def tearDown(self): + shutil.rmtree(self.test_dir) + + def _write_wg_conf(self, address='10.0.0.1/24', port=51820): + conf = ( + f'[Interface]\n' + f'PrivateKey = dummykey\n' + f'Address = {address}\n' + f'ListenPort = {port}\n' + ) + cf = self.wg._config_file() + os.makedirs(os.path.dirname(cf), exist_ok=True) + with open(cf, 'w') as f: + f.write(conf) + + # Public key is 44 chars ending in '=' — required by validation in add_cell_peer + _CELL_PUBKEY = 'cmVtb3RlcHVia2V5X2Zvcl90ZXN0c193Z3Rlc3QxMiE=' + + def test_add_cell_peer_overlapping_subnet_returns_false(self): + """vpn_subnet that exactly matches the local WG network must be rejected.""" + # local is 10.0.0.0/24; remote is also 10.0.0.0/24 — clear overlap + ok = self.wg.add_cell_peer( + 'remote', self._CELL_PUBKEY, '5.6.7.8:51821', '10.0.0.0/24' + ) + self.assertFalse(ok) + + def test_add_cell_peer_partially_overlapping_subnet_returns_false(self): + """A remote subnet that contains the local network (e.g. /16 ⊃ /24) is rejected.""" + # 10.0.0.0/16 contains 10.0.0.0/24 → overlaps + ok = self.wg.add_cell_peer( + 'remote', self._CELL_PUBKEY, '5.6.7.8:51821', '10.0.0.0/16' + ) + self.assertFalse(ok) + + def test_add_cell_peer_non_overlapping_subnet_accepted(self): + """A remote subnet distinct from the local WG network must be accepted.""" + # local is 10.0.0.0/24; remote is 10.0.1.0/24 — no overlap + ok = self.wg.add_cell_peer( + 'remote', self._CELL_PUBKEY, '5.6.7.8:51821', '10.0.1.0/24' + ) + self.assertTrue(ok) + + def test_add_cell_peer_no_overlap_different_class_a(self): + """A completely different address space is accepted.""" + # local is 10.0.0.0/24; remote is 192.168.5.0/24 — no overlap + ok = self.wg.add_cell_peer( + 'remote', self._CELL_PUBKEY, '5.6.7.8:51821', '192.168.5.0/24' + ) + self.assertTrue(ok) + + def test_add_cell_peer_overlap_check_uses_configured_network(self): + """When wg0.conf says 172.16.0.1/12, overlapping that range is rejected.""" + self._write_wg_conf(address='172.16.0.1/12') + ok = self.wg.add_cell_peer( + 'remote', self._CELL_PUBKEY, '5.6.7.8:51821', '172.16.0.0/12' + ) + self.assertFalse(ok) + + if __name__ == '__main__': unittest.main() \ No newline at end of file diff --git a/webui/src/pages/CellNetwork.jsx b/webui/src/pages/CellNetwork.jsx index ba0e43b..ca1b8cc 100644 --- a/webui/src/pages/CellNetwork.jsx +++ b/webui/src/pages/CellNetwork.jsx @@ -1,9 +1,25 @@ import { useState, useEffect } from 'react'; -import { Link2, Link2Off, Copy, CheckCheck, RefreshCw, Plug, Unplug, Globe, Wifi } from 'lucide-react'; +import { Link2, Link2Off, Copy, CheckCheck, RefreshCw, Plug, Unplug, Globe, Wifi, Calendar, FolderOpen, Mail, HardDrive, ChevronDown, ChevronRight } from 'lucide-react'; import { cellLinkAPI } from '../services/api'; import { useConfig } from '../contexts/ConfigContext'; import QRCode from 'qrcode'; +const relativeTime = (ts) => { + if (!ts) return null; + const diff = Math.floor((Date.now() / 1000) - (typeof ts === 'string' ? new Date(ts).getTime() / 1000 : ts)); + if (diff < 60) return `${diff}s ago`; + if (diff < 3600) return `${Math.floor(diff / 60)}m ago`; + if (diff < 86400) return `${Math.floor(diff / 3600)}h ago`; + return `${Math.floor(diff / 86400)}d ago`; +}; + +const SERVICE_DEFS = [ + { key: 'calendar', label: 'Calendar', Icon: Calendar }, + { key: 'files', label: 'Files', Icon: FolderOpen }, + { key: 'mail', label: 'Mail', Icon: Mail }, + { key: 'webdav', label: 'WebDAV', Icon: HardDrive }, +]; + function CopyButton({ text, small }) { const [copied, setCopied] = useState(false); const copy = () => { @@ -52,6 +68,194 @@ function useToasts() { return [toasts, add]; } +function DisconnectConfirmModal({ cellName, onConfirm, onCancel }) { + return ( +
+
+
+ +

Disconnect "{cellName}"?

+
+

+ This will remove the WireGuard tunnel and all sharing permissions between your cells. +

+

+ The other cell's admin will need to remove the connection on their end too. Shared services will become inaccessible immediately. +

+
+ + +
+
+
+ ); +} + +function ServiceShareToggle({ serviceKey, label, Icon, enabled, saving, onChange }) { + return ( + + ); +} + +function InboundServiceBadge({ label, Icon, active }) { + return ( + + + {label} + + ); +} + +function CellPanel({ conn, onDisconnect, addToast }) { + const [open, setOpen] = useState(false); + const [inboundPerms, setInboundPerms] = useState(conn.permissions?.inbound || {}); + const [saving, setSaving] = useState({}); + const [confirmDisconnect, setConfirmDisconnect] = useState(false); + + const handleToggle = async (serviceKey, newValue) => { + setSaving(s => ({ ...s, [serviceKey]: true })); + const newInbound = { ...inboundPerms, [serviceKey]: newValue }; + try { + await cellLinkAPI.updatePermissions(conn.cell_name, newInbound, conn.permissions?.outbound || {}); + setInboundPerms(newInbound); + addToast(`${serviceKey} sharing ${newValue ? 'enabled' : 'disabled'}`, 'success'); + } catch { + addToast('Failed to save sharing permission', 'error'); + } finally { + setSaving(s => ({ ...s, [serviceKey]: false })); + } + }; + + const hasRevokedService = Object.values(inboundPerms).some(v => !v); + + return ( + <> + {confirmDisconnect && ( + { setConfirmDisconnect(false); onDisconnect(conn.cell_name); }} + onCancel={() => setConfirmDisconnect(false)} + /> + )} +
+ + + {open && ( +
+
+
+

+ I share with {conn.cell_name} +

+
+ {SERVICE_DEFS.map(({ key, label, Icon }) => ( + handleToggle(key, v)} + /> + ))} +
+ {hasRevokedService && ( +

+ Services you stop sharing become unreachable from {conn.cell_name} immediately. +

+ )} +
+
+

+ {conn.cell_name} shares with me +

+ {(conn.permissions?.outbound && Object.values(conn.permissions.outbound).some(Boolean)) ? ( +
+ {SERVICE_DEFS.map(({ key, label, Icon }) => ( + + ))} +
+ ) : ( +

Nothing shared yet.

+ )} +

+ Inbound sharing is set by the other cell's admin. +

+
+
+
+
+ {conn.vpn_subnet &&
Subnet
{conn.vpn_subnet}
} + {conn.endpoint &&
Endpoint
{conn.endpoint}
} +
+ +
+
+ )} +
+ + ); +} + export default function CellNetwork() { const { cell_name = 'mycell', domain = 'cell' } = useConfig(); const [toasts, addToast] = useToasts(); @@ -64,6 +268,7 @@ export default function CellNetwork() { const [connsLoading, setConnsLoading] = useState(true); const [pasteText, setPasteText] = useState(''); + const [pasteError, setPasteError] = useState(''); const [connecting, setConnecting] = useState(false); useEffect(() => { @@ -89,17 +294,29 @@ export default function CellNetwork() { setConnsLoading(true); try { const r = await cellLinkAPI.listConnections(); - // Enrich with live status - const enriched = await Promise.all( - (r.data || []).map(async (conn) => { - try { - const s = await cellLinkAPI.getStatus(conn.cell_name); - return { ...conn, online: s.data.online, last_handshake: s.data.last_handshake }; - } catch { - return { ...conn, online: false }; - } - }) - ); + const conns = r.data || []; + + // Fetch all WireGuard peer statuses in one call and index by public_key + let statusByKey = {}; + try { + const { wireguardAPI } = await import('../services/api'); + const sr = await wireguardAPI.getPeerStatuses(); + (sr.data?.peers || []).forEach(p => { + if (p.public_key) statusByKey[p.public_key] = p; + }); + } catch { + // Status enrichment is best-effort; continue without it + } + + const enriched = conns.map(conn => { + const wg = conn.public_key ? statusByKey[conn.public_key] : null; + return { + ...conn, + online: wg ? wg.online : false, + last_handshake: wg ? wg.last_handshake : null, + }; + }); + setConnections(enriched); } catch { addToast('Failed to load connections', 'error'); @@ -108,6 +325,20 @@ export default function CellNetwork() { } }; + const validatePaste = (text) => { + if (!text.trim()) { setPasteError(''); return; } + try { + const p = JSON.parse(text.trim()); + if (!p.cell_name || !p.public_key || !p.vpn_subnet) { + setPasteError('JSON is missing required fields (cell_name, public_key, vpn_subnet)'); + } else { + setPasteError(''); + } + } catch { + setPasteError('Not valid JSON — paste the complete invite from the other cell'); + } + }; + const handleConnect = async () => { if (!pasteText.trim()) return; let parsed; @@ -122,6 +353,7 @@ export default function CellNetwork() { await cellLinkAPI.addConnection(parsed); addToast(`Connected to cell "${parsed.cell_name}"`); setPasteText(''); + setPasteError(''); loadConnections(); } catch (e) { addToast(e?.response?.data?.error || 'Connection failed', 'error'); @@ -130,8 +362,8 @@ export default function CellNetwork() { } }; + // Confirmation is handled inside CellPanel via DisconnectConfirmModal const handleDisconnect = async (name) => { - if (!window.confirm(`Disconnect from cell "${name}"?`)) return; try { await cellLinkAPI.removeConnection(name); addToast(`Disconnected from "${name}"`); @@ -230,16 +462,22 @@ export default function CellNetwork() {

Paste the invite JSON from the other cell's "Your Cell's Invite" panel:

-