Merge feature/security-fixes-and-qa into main

Includes 6 commits:
- PIC-to-PIC cell connection fixes (subnet overlap, WG reload, FORWARD rules, DNS)
- Service-sharing permissions backend (cell_links.json, /api/cells/permissions)
- Flask blueprint refactor (app.py -1735 lines extracted)
- Peer add rollback + manager extraction (P2 arch debt)
- VPN key sync on startup, DNS zone hostname/SOA fix, peer status UI
- 50 new tests (1071 total) + CellNetwork.jsx UI for service-sharing

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-05-01 11:09:34 -04:00
33 changed files with 4325 additions and 2926 deletions
+80 -2830
View File
File diff suppressed because it is too large Load Diff
+90 -3
View File
@@ -5,6 +5,7 @@ CellLinkManager — manages site-to-site connections between PIC cells.
Each connection is stored in data/cell_links.json and manifests as:
- A WireGuard [Peer] block (AllowedIPs = remote cell's VPN subnet)
- A CoreDNS forwarding block (remote domain remote cell's DNS IP)
- An iptables FORWARD rule set (service-level access control)
"""
import os
@@ -15,6 +16,20 @@ from typing import Any, Dict, List, Optional
logger = logging.getLogger(__name__)
VALID_SERVICES = ('calendar', 'files', 'mail', 'webdav')
_DEFAULT_PERMISSIONS = {
'inbound': {s: False for s in VALID_SERVICES},
'outbound': {s: False for s in VALID_SERVICES},
}
def _default_perms() -> Dict[str, Any]:
return {
'inbound': {s: False for s in VALID_SERVICES},
'outbound': {s: False for s in VALID_SERVICES},
}
class CellLinkManager:
def __init__(self, data_dir: str, config_dir: str, wireguard_manager, network_manager):
@@ -30,7 +45,16 @@ class CellLinkManager:
if os.path.exists(self.links_file):
try:
with open(self.links_file) as f:
return json.load(f)
links = json.load(f)
# Lazy migration: inject permissions field if missing
changed = False
for link in links:
if 'permissions' not in link:
link['permissions'] = _default_perms()
changed = True
if changed:
self._save(links)
return links
except Exception:
return []
return []
@@ -59,8 +83,13 @@ class CellLinkManager:
def list_connections(self) -> List[Dict[str, Any]]:
return self._load()
def add_connection(self, invite: Dict[str, Any]) -> Dict[str, Any]:
"""Import a remote cell's invite and establish the connection."""
def add_connection(self, invite: Dict[str, Any],
inbound_services: Optional[List[str]] = None) -> Dict[str, Any]:
"""Import a remote cell's invite and establish the connection.
inbound_services: which of THIS cell's services to share with the remote
cell immediately. Defaults to none (all-deny).
"""
links = self._load()
name = invite['cell_name']
if any(l['cell_name'] == name for l in links):
@@ -82,6 +111,11 @@ class CellLinkManager:
if dns_result.get('warnings'):
logger.warning('DNS forward warnings for %s: %s', name, dns_result['warnings'])
inbound = [s for s in (inbound_services or []) if s in VALID_SERVICES]
perms = _default_perms()
for s in inbound:
perms['inbound'][s] = True
link = {
'cell_name': name,
'public_key': invite['public_key'],
@@ -90,9 +124,18 @@ class CellLinkManager:
'dns_ip': invite['dns_ip'],
'domain': invite['domain'],
'connected_at': datetime.utcnow().isoformat(),
'permissions': perms,
}
links.append(link)
self._save(links)
# Apply iptables rules for the new cell (non-fatal if it fails)
try:
import firewall_manager as _fm
_fm.apply_cell_rules(name, invite['vpn_subnet'], inbound)
except Exception as e:
logger.warning(f"apply_cell_rules for {name} failed (non-fatal): {e}")
return link
def remove_connection(self, cell_name: str):
@@ -102,12 +145,56 @@ class CellLinkManager:
if not link:
raise ValueError(f"Cell '{cell_name}' not found")
# Clear firewall rules first (non-fatal)
try:
import firewall_manager as _fm
_fm.clear_cell_rules(cell_name)
except Exception as e:
logger.warning(f"clear_cell_rules for {cell_name} failed (non-fatal): {e}")
self.wireguard_manager.remove_peer(link['public_key'])
self.network_manager.remove_cell_dns_forward(link['domain'])
links = [l for l in links if l['cell_name'] != cell_name]
self._save(links)
def update_permissions(self, cell_name: str,
inbound: Dict[str, bool],
outbound: Dict[str, bool]) -> Dict[str, Any]:
"""Update service sharing permissions for a cell connection.
Validates service names, persists, and re-applies iptables rules.
Returns the updated link record.
"""
links = self._load()
link = next((l for l in links if l['cell_name'] == cell_name), None)
if not link:
raise ValueError(f"Cell '{cell_name}' not found")
# Validate and normalise — only known services, boolean values
clean_inbound = {s: bool(inbound.get(s, False)) for s in VALID_SERVICES}
clean_outbound = {s: bool(outbound.get(s, False)) for s in VALID_SERVICES}
link['permissions'] = {'inbound': clean_inbound, 'outbound': clean_outbound}
self._save(links)
# Re-apply firewall rules
inbound_list = [s for s, v in clean_inbound.items() if v]
try:
import firewall_manager as _fm
_fm.apply_cell_rules(cell_name, link['vpn_subnet'], inbound_list)
except Exception as e:
logger.warning(f"apply_cell_rules for {cell_name} failed (non-fatal): {e}")
return link
def get_permissions(self, cell_name: str) -> Dict[str, Any]:
"""Return the permissions dict for a connected cell."""
links = self._load()
link = next((l for l in links if l['cell_name'] == cell_name), None)
if not link:
raise ValueError(f"Cell '{cell_name}' not found")
return link.get('permissions', _default_perms())
def get_connection_status(self, cell_name: str) -> Dict[str, Any]:
"""Return link record enriched with live WireGuard handshake status."""
links = self._load()
+28
View File
@@ -235,6 +235,20 @@ class ConfigManager:
for zone_file in dns_data.glob('*.zone'):
shutil.copy2(zone_file, zones_dir / zone_file.name)
# Service-specific user account files (authoritative source of truth —
# cell_config.json only carries a best-effort sync of these).
svc_user_files = [
(data_dir / 'email' / 'users.json', 'email_users.json'),
(data_dir / 'calendar' / 'users.json', 'calendar_users.json'),
(data_dir / 'calendar' / 'calendars.json', 'calendar_calendars.json'),
]
for src, dest_name in svc_user_files:
if src.exists():
try:
shutil.copy2(src, backup_path / dest_name)
except (PermissionError, OSError) as e:
logger.warning(f"Could not back up {src.name}: {e} (skipping)")
services = ['identity'] + list(self.service_schemas.keys())
manifest = {
"backup_id": backup_id,
@@ -316,6 +330,20 @@ class ConfigManager:
except (PermissionError, OSError) as dir_err:
logger.warning(f"Could not create dns data dir {dns_data}: {dir_err} (skipping)")
# Service-specific user account files
svc_restore_map = [
(backup_path / 'email_users.json', data_dir / 'email' / 'users.json'),
(backup_path / 'calendar_users.json', data_dir / 'calendar' / 'users.json'),
(backup_path / 'calendar_calendars.json', data_dir / 'calendar' / 'calendars.json'),
]
for src, dest in svc_restore_map:
if src.exists():
try:
dest.parent.mkdir(parents=True, exist_ok=True)
shutil.copy2(src, dest)
except (PermissionError, OSError) as e:
logger.warning(f"Could not restore {dest.name}: {e} (skipping)")
self.configs = self._load_all_configs()
logger.info(f"Restored configuration from backup: {backup_id}")
return True
+77
View File
@@ -221,6 +221,83 @@ def apply_all_peer_rules(peers: List[Dict[str, Any]]) -> None:
})
# ---------------------------------------------------------------------------
# Cell-to-cell firewall rules
# ---------------------------------------------------------------------------
def _cell_tag(cell_name: str) -> str:
"""iptables comment tag for cell rules — distinct prefix from pic-peer-* to prevent collision."""
safe = re.sub(r'[^a-z0-9]', '-', cell_name.lower())
return f'pic-cell-{safe}'
def clear_cell_rules(cell_name: str) -> None:
"""Remove all FORWARD rules tagged for this cell (atomic save/restore)."""
tag = _cell_tag(cell_name)
comment_re = re.compile(rf'--comment\s+["\']?{re.escape(tag)}["\']?(\s|$)')
try:
save = _wg_exec(['iptables-save'])
if save.returncode != 0:
return
lines = save.stdout.splitlines()
filtered = [l for l in lines if not comment_re.search(l)]
if len(filtered) == len(lines):
return
restore_input = '\n'.join(filtered) + '\n'
restore = subprocess.run(
['docker', 'exec', '-i', WIREGUARD_CONTAINER, 'iptables-restore'],
input=restore_input, capture_output=True, text=True, timeout=10
)
if restore.returncode != 0:
logger.warning(f"clear_cell_rules iptables-restore failed: {restore.stderr.strip()}")
except Exception as e:
logger.error(f"clear_cell_rules({cell_name}): {e}")
def apply_cell_rules(cell_name: str, vpn_subnet: str, inbound_services: List[str]) -> bool:
"""Apply FORWARD rules for a cell-to-cell peer.
Traffic from vpn_subnet is allowed only to service VIPs listed in
inbound_services; all other cell traffic is DROPped. Cells get no
internet or peer access only explicit service VIPs.
Rule insertion order (last inserted top of chain):
1. Catch-all DROP for the subnet (inserted first bottom)
2. Per-service ACCEPT/DROP (inserted in reversed() order top)
"""
try:
tag = _cell_tag(cell_name)
clear_cell_rules(cell_name)
# Catch-all DROP — inserted first so it ends up at the bottom
_iptables(['-I', 'FORWARD', '-s', vpn_subnet,
'-m', 'comment', '--comment', tag, '-j', 'DROP'])
# Per-service rules — inserted in reverse dict order, highest-priority last
for service, svc_ip in reversed(list(SERVICE_IPS.items())):
target = 'ACCEPT' if service in inbound_services else 'DROP'
_iptables(['-I', 'FORWARD', '-s', vpn_subnet, '-d', svc_ip,
'-m', 'comment', '--comment', tag, '-j', target])
logger.info(f"Applied cell rules for {cell_name} ({vpn_subnet}): inbound={inbound_services}")
return True
except Exception as e:
logger.error(f"apply_cell_rules({cell_name}): {e}")
return False
def apply_all_cell_rules(cell_links: List[Dict[str, Any]]) -> None:
"""Re-apply firewall rules for all cell connections (called on startup)."""
for link in cell_links:
name = link.get('cell_name')
subnet = link.get('vpn_subnet')
if not name or not subnet:
continue
perms = link.get('permissions', {})
inbound = [s for s, v in perms.get('inbound', {}).items() if v]
apply_cell_rules(name, subnet, inbound)
# ---------------------------------------------------------------------------
# DNS ACL (CoreDNS Corefile generation)
# ---------------------------------------------------------------------------
+92
View File
@@ -0,0 +1,92 @@
"""
Manager singletons for the PIC API.
All service managers are instantiated here and imported by app.py. Routes in
app.py reference these by name from app's own namespace (so test patches via
`patch('app.email_manager', mock)` continue to work as before).
Directory/path env vars:
DATA_DIR host-mapped persistent data directory (default: /app/data)
CONFIG_DIR host-mapped config directory (default: /app/config)
"""
import os
from network_manager import NetworkManager
from wireguard_manager import WireGuardManager
from peer_registry import PeerRegistry
from email_manager import EmailManager
from calendar_manager import CalendarManager
from file_manager import FileManager
from routing_manager import RoutingManager
from vault_manager import VaultManager
from container_manager import ContainerManager
from config_manager import ConfigManager
from service_bus import ServiceBus, EventType
from log_manager import LogManager
from cell_link_manager import CellLinkManager
import firewall_manager
from auth_manager import AuthManager
DATA_DIR = os.environ.get('DATA_DIR', '/app/data')
CONFIG_DIR = os.environ.get('CONFIG_DIR', '/app/config')
config_manager = ConfigManager(
config_file=os.path.join(CONFIG_DIR, 'cell_config.json'),
data_dir=DATA_DIR,
)
service_bus = ServiceBus()
log_manager = LogManager(log_dir='./data/logs')
network_manager = NetworkManager(data_dir=DATA_DIR, config_dir=CONFIG_DIR)
wireguard_manager = WireGuardManager(data_dir=DATA_DIR, config_dir=CONFIG_DIR)
peer_registry = PeerRegistry(data_dir=DATA_DIR, config_dir=CONFIG_DIR)
email_manager = EmailManager(data_dir=DATA_DIR, config_dir=CONFIG_DIR)
calendar_manager = CalendarManager(data_dir=DATA_DIR, config_dir=CONFIG_DIR)
file_manager = FileManager(data_dir=DATA_DIR, config_dir=CONFIG_DIR)
routing_manager = RoutingManager(data_dir=DATA_DIR, config_dir=CONFIG_DIR)
vault_manager = VaultManager(data_dir=DATA_DIR, config_dir=CONFIG_DIR)
container_manager = ContainerManager(data_dir=DATA_DIR, config_dir=CONFIG_DIR)
cell_link_manager = CellLinkManager(
data_dir=DATA_DIR, config_dir=CONFIG_DIR,
wireguard_manager=wireguard_manager,
network_manager=network_manager,
)
auth_manager = AuthManager(data_dir=DATA_DIR, config_dir=CONFIG_DIR)
# Service logger configuration
_service_log_configs = {
'network': {'level': 'INFO', 'formatter': 'json', 'console': False},
'wireguard': {'level': 'INFO', 'formatter': 'json', 'console': False},
'email': {'level': 'INFO', 'formatter': 'json', 'console': False},
'calendar': {'level': 'INFO', 'formatter': 'json', 'console': False},
'files': {'level': 'INFO', 'formatter': 'json', 'console': False},
'routing': {'level': 'INFO', 'formatter': 'json', 'console': False},
'vault': {'level': 'INFO', 'formatter': 'json', 'console': False},
'api': {'level': 'INFO', 'formatter': 'json', 'console': True},
}
for _svc, _cfg in _service_log_configs.items():
log_manager.add_service_logger(_svc, _cfg)
# Apply any persisted log level overrides
import json as _json
_levels_file = os.path.join(os.path.dirname(__file__), 'config', 'log_levels.json')
if os.path.exists(_levels_file):
try:
with open(_levels_file) as _lf:
for _s, _l in _json.load(_lf).items():
log_manager.set_service_level(_s, _l)
except Exception:
pass
service_bus.start()
__all__ = [
'config_manager', 'service_bus', 'log_manager',
'network_manager', 'wireguard_manager', 'peer_registry',
'email_manager', 'calendar_manager', 'file_manager',
'routing_manager', 'vault_manager', 'container_manager',
'cell_link_manager', 'auth_manager',
'firewall_manager', 'EventType',
'DATA_DIR', 'CONFIG_DIR',
]
+34 -7
View File
@@ -476,10 +476,16 @@ class NetworkManager(BaseServiceManager):
if os.path.exists(src):
with open(src) as f:
zone_content = f.read()
# Try $ORIGIN first, then fall back to SOA MNAME
m = re.search(r'^\$ORIGIN\s+(\S+)', zone_content, re.MULTILINE)
old_origin = m.group(1).rstrip('.') if m else None
if m:
old_origin = m.group(1).rstrip('.')
else:
m2 = re.search(r'^@\s+IN\s+SOA\s+(\S+?)\.?\s', zone_content, re.MULTILINE)
old_origin = m2.group(1).rstrip('.') if m2 else None
if old_origin and old_origin != domain:
zone_content = zone_content.replace(f'{old_origin}.', f'{domain}.')
if re.search(r'^\$ORIGIN\s+', zone_content, re.MULTILINE):
zone_content = re.sub(
r'^\$ORIGIN\s+\S+', f'$ORIGIN {domain}.', zone_content, flags=re.MULTILINE)
with open(dst, 'w') as f:
@@ -507,11 +513,15 @@ class NetworkManager(BaseServiceManager):
"""Update the cell hostname record in the primary DNS zone file.
reload=False writes the zone file only use when deferring container restart.
old_name is a hint; if it's absent from the zone file, we detect the actual
hostname by finding the non-service A record pointing to the Caddy IP.
"""
restarted = []
warnings = []
if not old_name or not new_name or old_name == new_name:
if not new_name:
return {'restarted': restarted, 'warnings': warnings}
_service_names = {'api', 'webui', 'calendar', 'files', 'mail', 'webmail', 'webdav'}
changed = False
try:
dns_data = os.path.join(self.data_dir, 'dns')
if os.path.isdir(dns_data):
@@ -520,16 +530,33 @@ class NetworkManager(BaseServiceManager):
zone_file = os.path.join(dns_data, fname)
with open(zone_file) as f:
content = f.read()
# Match name with optional TTL: "name [ttl] IN A value"
content = re.sub(
rf'^{re.escape(old_name)}(\s+(?:\d+\s+)?IN\s+A\s+)',
# Determine which name to replace: prefer old_name if present,
# otherwise detect from zone (non-service A record not in _service_names)
actual_old = old_name if (
old_name and re.search(
rf'^{re.escape(old_name)}\s', content, re.MULTILINE)
) else None
if actual_old is None:
for m in re.finditer(
r'^(\S+)\s+(?:\d+\s+)?IN\s+A\s+\S+', content, re.MULTILINE
):
candidate = m.group(1)
if candidate not in _service_names and candidate != '@':
actual_old = candidate
break
if actual_old is None or actual_old == new_name:
break
new_content = re.sub(
rf'^{re.escape(actual_old)}(\s+(?:\d+\s+)?IN\s+A\s+)',
f'{new_name}\\1',
content, flags=re.MULTILINE
)
if new_content != content:
with open(zone_file, 'w') as f:
f.write(content)
f.write(new_content)
changed = True
break
if reload:
if changed and reload:
self._reload_dns_service()
restarted.append('cell-dns (reloaded)')
except Exception as e:
View File
+119
View File
@@ -0,0 +1,119 @@
import logging
from flask import Blueprint, request, jsonify
logger = logging.getLogger('picell')
bp = Blueprint('calendar', __name__)
@bp.route('/api/calendar/users', methods=['GET'])
def get_calendar_users():
"""Get calendar users."""
try:
from app import calendar_manager
users = calendar_manager.get_users()
return jsonify(users)
except Exception as e:
logger.error(f"Error getting calendar users: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/calendar/users', methods=['POST'])
def create_calendar_user():
"""Create calendar user."""
try:
from app import calendar_manager
data = request.get_json(silent=True)
if data is None:
return jsonify({"error": "No data provided"}), 400
username = data.get('username')
password = data.get('password')
if not username or not password:
return jsonify({"error": "Missing required fields: username, password"}), 400
result = calendar_manager.create_calendar_user(username, password)
return jsonify({"created": result})
except Exception as e:
logger.error(f"Error creating calendar user: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/calendar/users/<username>', methods=['DELETE'])
def delete_calendar_user(username):
"""Delete calendar user."""
try:
from app import calendar_manager
result = calendar_manager.delete_calendar_user(username)
return jsonify({"deleted": result})
except Exception as e:
logger.error(f"Error deleting calendar user: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/calendar/calendars', methods=['POST'])
def create_calendar():
"""Create calendar."""
try:
from app import calendar_manager
data = request.get_json(silent=True)
if data is None:
return jsonify({"error": "No data provided"}), 400
username = data.get('username')
calendar_name = data.get('name') or data.get('calendar_name')
if not username or not calendar_name:
return jsonify({"error": "Missing required fields: username, name"}), 400
result = calendar_manager.create_calendar(
username,
calendar_name,
description=data.get('description', ''),
color=data.get('color', '#4285f4'),
)
return jsonify({"created": result})
except Exception as e:
logger.error(f"Error creating calendar: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/calendar/events', methods=['POST'])
def add_calendar_event():
try:
from app import calendar_manager
data = request.get_json(silent=True)
if data is None:
return jsonify({"error": "No data provided"}), 400
username = data.get('username')
calendar_name = data.get('calendar_name') or data.get('calendar')
if not username or not calendar_name:
return jsonify({"error": "Missing required fields: username, calendar_name"}), 400
event_data = {k: v for k, v in data.items() if k not in ('username', 'calendar_name', 'calendar')}
result = calendar_manager.add_event(username, calendar_name, event_data)
return jsonify({"created": result})
except Exception as e:
logger.error(f"Error adding calendar event: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/calendar/events/<username>/<calendar_name>', methods=['GET'])
def get_calendar_events(username, calendar_name):
"""Get calendar events."""
try:
from app import calendar_manager
params = request.args.to_dict()
result = calendar_manager.get_events(username, calendar_name, params)
return jsonify(result)
except Exception as e:
logger.error(f"Error getting calendar events: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/calendar/status', methods=['GET'])
def get_calendar_status():
"""Get calendar service status."""
try:
from app import calendar_manager
status = calendar_manager.get_status()
return jsonify(status)
except Exception as e:
logger.error(f"Error getting calendar status: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/calendar/connectivity', methods=['GET'])
def test_calendar_connectivity():
"""Test calendar connectivity."""
try:
from app import calendar_manager
result = calendar_manager.test_connectivity()
return jsonify(result)
except Exception as e:
logger.error(f"Error testing calendar connectivity: {e}")
return jsonify({"error": str(e)}), 500
+126
View File
@@ -0,0 +1,126 @@
import logging
import os
from flask import Blueprint, request, jsonify
from cell_link_manager import VALID_SERVICES
logger = logging.getLogger('picell')
bp = Blueprint('cells', __name__)
@bp.route('/api/cells/invite', methods=['GET'])
def get_cell_invite():
try:
from app import cell_link_manager, config_manager
identity = config_manager.configs.get('_identity', {})
cell_name = identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell'))
domain = identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell'))
return jsonify(cell_link_manager.generate_invite(cell_name, domain))
except Exception as e:
logger.error(f"Error generating cell invite: {e}")
return jsonify({'error': str(e)}), 500
@bp.route('/api/cells/services', methods=['GET'])
def list_shareable_services():
"""Return the list of services that can be shared between cells."""
try:
from firewall_manager import SERVICE_IPS
return jsonify({'services': list(SERVICE_IPS.keys())})
except Exception as e:
return jsonify({'error': str(e)}), 500
@bp.route('/api/cells', methods=['GET'])
def list_cell_connections():
try:
from app import cell_link_manager
return jsonify(cell_link_manager.list_connections())
except Exception as e:
return jsonify({'error': str(e)}), 500
@bp.route('/api/cells', methods=['POST'])
def add_cell_connection():
try:
from app import cell_link_manager
data = request.get_json(silent=True)
if not data:
return jsonify({'error': 'No data provided'}), 400
for field in ('cell_name', 'public_key', 'vpn_subnet', 'dns_ip', 'domain'):
if field not in data:
return jsonify({'error': f'Missing field: {field}'}), 400
inbound_services = data.get('inbound_services', [])
link = cell_link_manager.add_connection(data, inbound_services=inbound_services)
return jsonify({'message': f"Connected to cell '{data['cell_name']}'", 'link': link}), 201
except ValueError as e:
return jsonify({'error': str(e)}), 400
except RuntimeError as e:
return jsonify({'error': str(e)}), 400
except Exception as e:
logger.error(f"Error adding cell connection: {e}")
return jsonify({'error': str(e)}), 500
@bp.route('/api/cells/<cell_name>', methods=['DELETE'])
def remove_cell_connection(cell_name):
try:
from app import cell_link_manager
cell_link_manager.remove_connection(cell_name)
return jsonify({'message': f"Cell '{cell_name}' disconnected"})
except ValueError as e:
return jsonify({'error': str(e)}), 404
except Exception as e:
logger.error(f"Error removing cell connection: {e}")
return jsonify({'error': str(e)}), 500
@bp.route('/api/cells/<cell_name>/status', methods=['GET'])
def get_cell_connection_status(cell_name):
try:
from app import cell_link_manager
return jsonify(cell_link_manager.get_connection_status(cell_name))
except ValueError as e:
return jsonify({'error': str(e)}), 404
except Exception as e:
return jsonify({'error': str(e)}), 500
@bp.route('/api/cells/<cell_name>/permissions', methods=['GET'])
def get_cell_permissions(cell_name):
try:
from app import cell_link_manager
perms = cell_link_manager.get_permissions(cell_name)
return jsonify(perms)
except ValueError as e:
return jsonify({'error': str(e)}), 404
except Exception as e:
return jsonify({'error': str(e)}), 500
@bp.route('/api/cells/<cell_name>/permissions', methods=['PUT'])
def update_cell_permissions(cell_name):
try:
from app import cell_link_manager, firewall_manager, peer_registry
from app import COREFILE_PATH
data = request.get_json(silent=True)
if not data:
return jsonify({'error': 'No data provided'}), 400
# Validate service names in inbound/outbound
for direction in ('inbound', 'outbound'):
for service in data.get(direction, {}):
if service not in VALID_SERVICES:
return jsonify({'error': f'Unknown service: {service!r}'}), 400
inbound = data.get('inbound', {})
outbound = data.get('outbound', {})
link = cell_link_manager.update_permissions(cell_name, inbound, outbound)
# Regenerate Corefile so outbound DNS changes take effect
try:
from app import config_manager
domain = config_manager.configs.get('_identity', {}).get('domain', 'cell')
peers = peer_registry.list_peers()
cell_links = cell_link_manager.list_connections()
firewall_manager.apply_all_dns_rules(peers, COREFILE_PATH, domain,
cell_links=cell_links)
except Exception as e:
logger.warning(f"DNS regen after permission update failed (non-fatal): {e}")
return jsonify({'message': f"Permissions updated for '{cell_name}'", 'link': link})
except ValueError as e:
return jsonify({'error': str(e)}), 404
except Exception as e:
logger.error(f"Error updating cell permissions: {e}")
return jsonify({'error': str(e)}), 500
+673
View File
@@ -0,0 +1,673 @@
import io
import os
import re
import copy
import json
import ipaddress
import zipfile
import shutil
import logging
import threading
from datetime import datetime
from flask import Blueprint, request, jsonify, send_file, current_app
logger = logging.getLogger('picell')
bp = Blueprint('config', __name__)
# ---------------------------------------------------------------------------
# Pending-restart helpers
# ---------------------------------------------------------------------------
def _collect_service_ports(configs: dict) -> dict:
"""Extract current port values from service configs for .env generation."""
from app import config_manager as _cm
ports = {}
net = configs.get('network', {})
wg = configs.get('wireguard', {})
email = configs.get('email', {})
cal = configs.get('calendar', {})
files = configs.get('files', {})
identity = configs.get('_identity', {})
if 'dns_port' in net: ports['dns_port'] = net['dns_port']
if 'port' in wg: ports['wg_port'] = wg['port']
elif 'wireguard_port' in identity: ports['wg_port'] = identity['wireguard_port']
if 'smtp_port' in email: ports['mail_smtp_port'] = email['smtp_port']
if 'submission_port' in email: ports['mail_submission_port'] = email['submission_port']
if 'imap_port' in email: ports['mail_imap_port'] = email['imap_port']
if 'webmail_port' in email: ports['rainloop_port'] = email['webmail_port']
if 'port' in cal: ports['radicale_port'] = cal['port']
if 'port' in files: ports['webdav_port'] = files['port']
if 'manager_port' in files: ports['filegator_port'] = files['manager_port']
return ports
def _dedup_changes(existing: list, new: list) -> list:
"""Merge change lists, keeping only the latest entry per config key."""
def key_of(msg: str) -> str:
if ' changed' in msg:
return msg.split(' changed')[0].strip()
if ':' in msg:
return msg.split(':')[0].strip()
return msg
merged = {key_of(c): c for c in existing}
merged.update({key_of(c): c for c in new})
return list(merged.values())
def _set_pending_restart(changes: list, containers: list = None, network_recreate: bool = False,
pre_change_snapshot: dict = None):
"""Record that specific containers need to be restarted to apply configuration."""
from app import config_manager
existing = config_manager.configs.get('_pending_restart', {})
existing_changes = existing.get('changes', []) if existing.get('needs_restart') else []
existing_containers = existing.get('containers', []) if existing.get('needs_restart') else []
if not existing.get('needs_restart'):
snapshot = pre_change_snapshot or {}
else:
snapshot = existing.get('_snapshot', {})
if containers is None or '*' in (containers or []) or existing_containers == ['*']:
new_containers = ['*']
else:
new_containers = list(set(existing_containers) | set(containers))
config_manager.configs['_pending_restart'] = {
'needs_restart': True,
'changed_at': datetime.utcnow().isoformat(),
'changes': _dedup_changes(existing_changes, changes),
'containers': new_containers,
'network_recreate': network_recreate or existing.get('network_recreate', False),
'_snapshot': snapshot,
}
config_manager._save_all_configs()
def _clear_pending_restart():
from app import config_manager
config_manager.configs['_pending_restart'] = {
'needs_restart': False, 'changes': [], 'containers': [], 'network_recreate': False
}
config_manager._save_all_configs()
# ---------------------------------------------------------------------------
# Config routes
# ---------------------------------------------------------------------------
@bp.route('/api/config', methods=['GET'])
def get_config():
try:
from app import config_manager
import ip_utils as _ip_utils_cfg
service_configs = config_manager.get_all_configs()
identity = service_configs.pop('_identity', {})
config = {
'cell_name': identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell')),
'domain': identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell')),
'ip_range': identity.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')),
'wireguard_port': identity.get('wireguard_port', int(os.environ.get('WG_PORT', '51820'))),
}
_ips = _ip_utils_cfg.get_service_ips(config['ip_range'])
config['service_ips'] = {
'dns': _ips['dns'],
'vip_mail': _ips['vip_mail'],
'vip_calendar': _ips['vip_calendar'],
'vip_files': _ips['vip_files'],
'vip_webdav': _ips['vip_webdav'],
}
config['service_configs'] = service_configs
return jsonify(config)
except Exception as e:
logger.error(f"Error getting config: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/config', methods=['PUT'])
def update_config():
try:
from app import (config_manager, network_manager, wireguard_manager, email_manager,
calendar_manager, file_manager, routing_manager,
peer_registry, firewall_manager, service_bus, EventType, detect_conflicts)
import ip_utils
data = request.get_json(silent=True)
if data is None:
return jsonify({"error": "No data provided"}), 400
identity_keys = {'cell_name', 'domain', 'ip_range', 'wireguard_port'}
identity_updates = {k: v for k, v in data.items() if k in identity_keys}
_CELL_NAME_RE = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9-]{0,254}$')
_DOMAIN_RE = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9.-]{0,254}$')
if 'cell_name' in identity_updates:
v = str(identity_updates['cell_name'])
if not v:
return jsonify({'error': 'cell_name cannot be empty'}), 400
if len(v) > 255:
return jsonify({'error': 'cell_name must be 255 characters or fewer'}), 400
if not _CELL_NAME_RE.match(v):
return jsonify({'error': 'Invalid cell_name: use only letters, digits, hyphens'}), 400
if 'domain' in identity_updates:
v = str(identity_updates['domain'])
if not v:
return jsonify({'error': 'domain cannot be empty'}), 400
if len(v) > 255:
return jsonify({'error': 'domain must be 255 characters or fewer'}), 400
if not _DOMAIN_RE.match(v):
return jsonify({'error': 'Invalid domain: use only letters, digits, hyphens, dots'}), 400
if 'ip_range' in identity_updates:
_rfc1918 = [
ipaddress.ip_network('10.0.0.0/8'),
ipaddress.ip_network('172.16.0.0/12'),
ipaddress.ip_network('192.168.0.0/16'),
]
try:
_raw = str(identity_updates['ip_range'])
if '/' not in _raw:
return jsonify({'error': 'ip_range must include a CIDR prefix (e.g. 172.20.0.0/16)'}), 400
_net = ipaddress.ip_network(_raw, strict=False)
if not any(_net.subnet_of(r) for r in _rfc1918):
return jsonify({'error': (
'ip_range must be within an RFC-1918 private range '
'(10.0.0.0/8, 172.16.0.0/12, or 192.168.0.0/16)'
)}), 400
except ValueError as _e:
return jsonify({'error': f'Invalid ip_range: {_e}'}), 400
_port_fields = {
'network': ['dns_port'],
'wireguard': ['port'],
'email': ['smtp_port', 'submission_port', 'imap_port', 'webmail_port'],
'calendar': ['port'],
'files': ['port', 'manager_port'],
}
for _svc, _fields in _port_fields.items():
if _svc not in data:
continue
_svc_data = data[_svc]
if not isinstance(_svc_data, dict):
continue
for _f in _fields:
if _f in _svc_data and _svc_data[_f] is not None and _svc_data[_f] != '':
try:
_p = int(_svc_data[_f])
if not (1 <= _p <= 65535):
raise ValueError()
except (ValueError, TypeError):
return jsonify({'error': f'{_svc}.{_f} must be an integer between 1 and 65535'}), 400
_conflicts = detect_conflicts(config_manager.configs, data)
if _conflicts:
_msgs = [
f"port {_c['port']} is used by {', '.join(f'{_s}.{_f}' for _s, _f in _c['conflicts'])}"
for _c in _conflicts
]
return jsonify({'error': 'Port conflict: ' + '; '.join(_msgs)}), 409
if 'wireguard' in data and isinstance(data['wireguard'], dict):
_addr = data['wireguard'].get('address')
if _addr:
if '/' not in str(_addr):
return jsonify({'error': 'wireguard.address must include a prefix length (e.g. 10.0.0.1/24)'}), 400
try:
ipaddress.ip_interface(_addr)
except ValueError as _e:
return jsonify({'error': f'wireguard.address is not a valid IP/CIDR: {_e}'}), 400
old_identity = dict(config_manager.configs.get('_identity', {}))
old_svc_configs = {
svc: dict(config_manager.configs.get(svc, {}))
for svc in data if svc in config_manager.service_schemas
}
_pre_change_snapshot = {k: copy.deepcopy(v) for k, v in config_manager.configs.items()
if not k.startswith('_')}
_pre_change_snapshot['_identity'] = copy.deepcopy(config_manager.configs.get('_identity', {}))
if identity_updates:
stored = config_manager.configs.get('_identity', {})
stored.update(identity_updates)
config_manager.configs['_identity'] = stored
config_manager._save_all_configs()
_svc_managers = {
'network': network_manager,
'wireguard': wireguard_manager,
'email': email_manager,
'calendar': calendar_manager,
'files': file_manager,
'routing': routing_manager,
'vault': current_app.vault_manager,
}
all_restarted = []
all_warnings = []
for service, config in data.items():
if service in config_manager.service_schemas:
config_manager.update_service_config(service, config)
mgr = _svc_managers.get(service)
if mgr:
mgr.update_config(config)
result = mgr.apply_config(config)
all_restarted.extend(result.get('restarted', []))
all_warnings.extend(result.get('warnings', []))
service_bus.publish_event(EventType.CONFIG_CHANGED, service, {
'service': service, 'config': config
})
if service == 'wireguard' and ('port' in config or 'address' in config):
for p in peer_registry.list_peers():
peer_registry.update_peer(p['peer'], {'config_needs_reinstall': True})
n = len(peer_registry.list_peers())
if n:
all_warnings.append(f'WireGuard endpoint changed — {n} peer(s) must reinstall VPN config')
if 'port' in config:
_id = config_manager.configs.get('_identity', {})
_id['wireguard_port'] = config['port']
config_manager.configs['_identity'] = _id
config_manager._save_all_configs()
if identity_updates.get('domain') and identity_updates['domain'] != old_identity.get('domain', ''):
domain = identity_updates['domain']
net_result = network_manager.apply_domain(domain, reload=False)
all_warnings.extend(net_result.get('warnings', []))
_cur_id = config_manager.configs.get('_identity', {})
ip_utils.write_caddyfile(
_cur_id.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')),
_cur_id.get('cell_name', os.environ.get('CELL_NAME', 'mycell')),
domain, '/app/config-caddy/Caddyfile'
)
_set_pending_restart(
[f'domain changed to {domain}'],
['dns', 'caddy'],
pre_change_snapshot=_pre_change_snapshot,
)
if identity_updates.get('cell_name'):
old_name = old_identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell'))
new_name = identity_updates['cell_name']
if old_name != new_name:
cn_result = network_manager.apply_cell_name(old_name, new_name, reload=False)
all_warnings.extend(cn_result.get('warnings', []))
_cur_id2 = config_manager.configs.get('_identity', {})
ip_utils.write_caddyfile(
_cur_id2.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')),
new_name,
identity_updates.get('domain') or _cur_id2.get('domain', os.environ.get('CELL_DOMAIN', 'cell')),
'/app/config-caddy/Caddyfile'
)
_set_pending_restart(
[f'cell_name changed to {new_name}'],
['dns'],
pre_change_snapshot=_pre_change_snapshot,
)
if identity_updates.get('ip_range') and identity_updates['ip_range'] != old_identity.get('ip_range', ''):
new_range = identity_updates['ip_range']
cur_identity = config_manager.configs.get('_identity', {})
cur_cell_name = cur_identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell'))
cur_domain = cur_identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell'))
ip_result = network_manager.apply_ip_range(new_range, cur_cell_name, cur_domain)
all_restarted.extend(ip_result.get('restarted', []))
all_warnings.extend(ip_result.get('warnings', []))
firewall_manager.update_service_ips(new_range)
firewall_manager.ensure_caddy_virtual_ips()
env_file = os.environ.get('COMPOSE_ENV_FILE', '/app/.env.compose')
ip_utils.write_env_file(new_range, env_file, _collect_service_ports(config_manager.configs))
ip_utils.write_caddyfile(new_range, cur_cell_name, cur_domain, '/app/config-caddy/Caddyfile')
_set_pending_restart(
[f'ip_range changed to {new_range} — network will be recreated'],
['*'], network_recreate=True,
pre_change_snapshot=_pre_change_snapshot,
)
_PORT_CHANGE_MAP = {
('network', 'dns_port'): ('dns_port', ['dns']),
('wireguard','port'): ('wg_port', ['wireguard']),
('email', 'smtp_port'): ('mail_smtp_port', ['mail']),
('email', 'submission_port'): ('mail_submission_port', ['mail']),
('email', 'imap_port'): ('mail_imap_port', ['mail']),
('email', 'webmail_port'): ('rainloop_port', ['rainloop']),
('calendar', 'port'): ('radicale_port', ['radicale']),
('files', 'port'): ('webdav_port', ['webdav']),
('files', 'manager_port'): ('filegator_port', ['filegator']),
}
port_changed_containers = set()
port_change_messages = []
for (svc_key, field), (_env_key, containers) in _PORT_CHANGE_MAP.items():
if svc_key in data and field in data[svc_key]:
default_val = ip_utils.PORT_DEFAULTS.get(_env_key)
old_val = old_svc_configs.get(svc_key, {}).get(field, default_val)
new_val = data[svc_key][field]
if old_val != new_val:
port_changed_containers.update(containers)
port_change_messages.append(f'{svc_key} {field}: {old_val}{new_val}')
if 'wireguard_port' in identity_updates:
old_wg = old_identity.get('wireguard_port', ip_utils.PORT_DEFAULTS.get('wg_port', 51820))
new_wg = identity_updates['wireguard_port']
if old_wg != new_wg:
_wg_svc = config_manager.configs.get('wireguard', {})
_wg_svc['port'] = new_wg
config_manager.update_service_config('wireguard', _wg_svc)
wireguard_manager.apply_config({'port': new_wg})
port_changed_containers.add('wireguard')
port_change_messages.append(f'wireguard_port: {old_wg}{new_wg}')
if port_changed_containers:
env_file = os.environ.get('COMPOSE_ENV_FILE', '/app/.env.compose')
_ip_range = config_manager.configs.get('_identity', {}).get(
'ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')
)
ip_utils.write_env_file(_ip_range, env_file, _collect_service_ports(config_manager.configs))
_set_pending_restart(port_change_messages, list(port_changed_containers),
pre_change_snapshot=_pre_change_snapshot)
logger.info(f"Updated config, restarted: {all_restarted}")
return jsonify({
"message": "Configuration updated and applied",
"restarted": all_restarted,
"warnings": all_warnings,
})
except Exception as e:
logger.error(f"Error updating config: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/config/pending', methods=['GET'])
def get_pending_config():
from app import config_manager
pending = config_manager.configs.get('_pending_restart', {})
return jsonify({
'needs_restart': pending.get('needs_restart', False),
'applying': pending.get('applying', False),
'changed_at': pending.get('changed_at'),
'changes': pending.get('changes', []),
'containers': pending.get('containers', ['*']),
})
@bp.route('/api/config/pending', methods=['DELETE'])
def cancel_pending_config():
from app import config_manager, network_manager
import ip_utils as _ip_revert
pending = config_manager.configs.get('_pending_restart', {})
snapshot = pending.get('_snapshot', {})
if snapshot:
cur_identity = dict(config_manager.configs.get('_identity', {}))
old_identity = snapshot.get('_identity', {})
for k, v in snapshot.items():
config_manager.configs[k] = v
_id = config_manager.configs.get('_identity', {})
_dom = _id.get('domain', os.environ.get('CELL_DOMAIN', 'cell'))
cur_domain = cur_identity.get('domain', '')
old_domain = old_identity.get('domain', '')
if cur_domain and old_domain and cur_domain != old_domain:
network_manager.apply_domain(old_domain, reload=False)
cur_cell_name = cur_identity.get('cell_name', '')
old_cell_name = old_identity.get('cell_name', '')
if cur_cell_name and old_cell_name and cur_cell_name != old_cell_name:
network_manager.apply_cell_name(cur_cell_name, old_cell_name, reload=False)
_ip_revert.write_caddyfile(
_id.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')),
_id.get('cell_name', os.environ.get('CELL_NAME', 'mycell')),
_dom, '/app/config-caddy/Caddyfile'
)
_clear_pending_restart()
return jsonify({'message': 'Pending changes discarded'})
@bp.route('/api/config/apply', methods=['POST'])
def apply_pending_config():
try:
from app import config_manager
pending = config_manager.configs.get('_pending_restart', {})
if not pending.get('needs_restart'):
return jsonify({'message': 'No pending changes to apply'})
project_dir = '/home/roof/pic'
api_image = 'pic_api:latest'
data_host_path = '/home/roof/pic/data/api'
try:
import docker as _docker_sdk
_client = _docker_sdk.from_env()
_self = _client.containers.get('cell-api')
project_dir = _self.labels.get('com.docker.compose.project.working_dir', project_dir)
tags = _self.image.tags
if tags:
api_image = tags[0]
for _m in _self.attrs.get('Mounts', []):
if _m.get('Destination') == '/app/data':
data_host_path = _m.get('Source', data_host_path)
break
except Exception:
pass
containers = pending.get('containers', ['*'])
needs_network_recreate = pending.get('network_recreate', False)
host_env = os.path.join(project_dir, '.env')
host_compose = os.path.join(project_dir, 'docker-compose.yml')
if '*' in containers:
config_manager.configs['_pending_restart']['applying'] = True
config_manager._save_all_configs()
import base64 as _b64
_clear_py = (
"import json,os; p='/app/data/cell_config.json';"
"f=open(p); d=json.load(f); f.close();"
"d['_pending_restart']={'needs_restart':False,'changes':[],'containers':[],'network_recreate':False};"
"tmp=p+'.tmp'; open(tmp,'w').write(json.dumps(d,indent=2)); os.replace(tmp,p)"
)
_b64_cmd = _b64.b64encode(_clear_py.encode()).decode()
clear_flag_cmd = f"python3 -c \"import base64; exec(base64.b64decode('{_b64_cmd}').decode())\""
if needs_network_recreate:
helper_script = (
f'sleep 2'
f' && docker compose --project-directory {project_dir}'
f' -f {host_compose} --env-file {host_env} down'
f' && {clear_flag_cmd}'
f' && docker compose --project-directory {project_dir}'
f' -f {host_compose} --env-file {host_env} up -d'
)
else:
helper_script = (
f'sleep 2'
f' && {clear_flag_cmd}'
f' && docker compose --project-directory {project_dir}'
f' -f {host_compose} --env-file {host_env} up -d'
)
def _do_apply():
import subprocess as _subprocess
_subprocess.Popen(
['docker', 'run', '--rm',
'-v', '/var/run/docker.sock:/var/run/docker.sock',
'-v', f'{project_dir}:{project_dir}',
'-v', f'{data_host_path}:/app/data',
'--entrypoint', 'sh',
api_image,
'-c', helper_script],
close_fds=True,
stdout=_subprocess.DEVNULL,
stderr=_subprocess.DEVNULL,
)
logger.info(
'spawned helper container for all-services restart'
+ (' (network_recreate)' if needs_network_recreate else '')
)
else:
def _do_apply():
import time as _time
import subprocess as _subprocess
_time.sleep(0.3)
result = _subprocess.run(
['docker', 'compose',
'--project-directory', project_dir,
'-f', '/app/docker-compose.yml',
'--env-file', '/app/.env.compose',
'up', '-d', '--no-deps', '--force-recreate'] + containers,
capture_output=True, text=True, timeout=120,
)
if result.returncode != 0:
logger.error(f"docker compose up failed: {result.stderr.strip()}")
else:
logger.info(f'docker compose up completed for: {containers}')
_clear_pending_restart()
threading.Thread(target=_do_apply, daemon=False).start()
return jsonify({
'message': 'Applying configuration — containers are restarting',
'restart_in_progress': True,
})
except Exception as e:
logger.error(f"Error applying config: {e}")
return jsonify({'error': str(e)}), 500
@bp.route('/api/config/backup', methods=['POST'])
def create_config_backup():
try:
from app import config_manager, service_bus, EventType
backup_id = config_manager.backup_config()
service_bus.publish_event(EventType.BACKUP_CREATED, 'api', {
'backup_id': backup_id,
'timestamp': datetime.utcnow().isoformat()
})
return jsonify({"backup_id": backup_id})
except Exception as e:
logger.error(f"Error creating backup: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/config/backups', methods=['GET'])
def list_config_backups():
try:
from app import config_manager
return jsonify(config_manager.list_backups())
except Exception as e:
logger.error(f"Error listing backups: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/config/restore/<backup_id>', methods=['POST'])
def restore_config(backup_id):
try:
from app import config_manager, service_bus, EventType
data = request.get_json(silent=True) or {}
success = config_manager.restore_config(backup_id, services=data.get('services'))
if success:
service_bus.publish_event(EventType.RESTORE_COMPLETED, 'api', {
'backup_id': backup_id,
'timestamp': datetime.utcnow().isoformat()
})
return jsonify({"message": f"Configuration restored from backup: {backup_id}"})
return jsonify({"error": f"Failed to restore backup: {backup_id}"}), 500
except Exception as e:
logger.error(f"Error restoring backup: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/config/export', methods=['GET'])
def export_config():
try:
from app import config_manager
format = request.args.get('format', 'json')
config_data = config_manager.export_config(format)
return jsonify({"config": config_data, "format": format})
except Exception as e:
logger.error(f"Error exporting config: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/config/import', methods=['POST'])
def import_config():
try:
from app import config_manager
data = request.get_json(silent=True)
if data is None:
return jsonify({"error": "No data provided"}), 400
success = config_manager.import_config(data.get('config'), data.get('format', 'json'))
if success:
return jsonify({"message": "Configuration imported successfully"})
return jsonify({"error": "Failed to import configuration"}), 500
except Exception as e:
logger.error(f"Error importing config: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/config/backups/<backup_id>/download', methods=['GET'])
def download_backup(backup_id):
try:
from app import config_manager
backup_path = config_manager.backup_dir / backup_id
if not backup_path.exists():
return jsonify({'error': f'Backup {backup_id} not found'}), 404
buf = io.BytesIO()
with zipfile.ZipFile(buf, 'w', zipfile.ZIP_DEFLATED) as zf:
for f in backup_path.rglob('*'):
if f.is_file():
zf.write(f, f.relative_to(backup_path))
buf.seek(0)
return send_file(buf, mimetype='application/zip',
as_attachment=True,
download_name=f'{backup_id}.zip')
except Exception as e:
logger.error(f"Error downloading backup: {e}")
return jsonify({'error': str(e)}), 500
@bp.route('/api/config/backup/upload', methods=['POST'])
def upload_backup():
try:
from app import config_manager
if 'file' not in request.files:
return jsonify({'error': 'No file provided'}), 400
f = request.files['file']
filename = f.filename or ''
if filename.endswith('.zip'):
backup_id = filename[:-4]
else:
backup_id = f"backup_{datetime.utcnow().strftime('%Y%m%d_%H%M%S')}"
backup_id = ''.join(c for c in backup_id if c.isalnum() or c == '_')
backup_path = config_manager.backup_dir / backup_id
backup_path.mkdir(parents=True, exist_ok=True)
try:
with zipfile.ZipFile(io.BytesIO(f.read())) as zf:
zf.extractall(backup_path)
except zipfile.BadZipFile:
shutil.rmtree(backup_path, ignore_errors=True)
return jsonify({'error': 'Invalid zip file'}), 400
if not (backup_path / 'manifest.json').exists():
shutil.rmtree(backup_path, ignore_errors=True)
return jsonify({'error': 'Invalid backup: missing manifest.json'}), 400
return jsonify({'backup_id': backup_id})
except Exception as e:
logger.error(f"Error uploading backup: {e}")
return jsonify({'error': str(e)}), 500
@bp.route('/api/config/backups/<backup_id>', methods=['DELETE'])
def delete_config_backup(backup_id):
try:
from app import config_manager
success = config_manager.delete_backup(backup_id)
if success:
return jsonify({"message": f"Backup {backup_id} deleted"})
return jsonify({"error": f"Failed to delete backup {backup_id}"}), 500
except Exception as e:
logger.error(f"Error deleting backup: {e}")
return jsonify({"error": str(e)}), 500
+195
View File
@@ -0,0 +1,195 @@
import logging
import os
from flask import Blueprint, request, jsonify, current_app
logger = logging.getLogger('picell')
bp = Blueprint('containers', __name__)
@bp.route('/api/containers', methods=['GET'])
def list_containers():
try:
from app import container_manager, is_local_request
if not is_local_request():
return jsonify({'error': 'Access denied'}), 403
return jsonify(container_manager.list_containers())
except Exception as e:
logger.error(f"Error listing containers: {e}")
return jsonify({'error': str(e)}), 500
@bp.route('/api/containers/<name>/start', methods=['POST'])
def start_container(name):
try:
from app import container_manager, is_local_request
if not is_local_request():
return jsonify({'error': 'Access denied'}), 403
return jsonify({'started': container_manager.start_container(name)})
except Exception as e:
logger.error(f"Error starting container {name}: {e}")
return jsonify({'error': str(e)}), 500
@bp.route('/api/containers/<name>/stop', methods=['POST'])
def stop_container(name):
try:
from app import container_manager, is_local_request
if not is_local_request():
return jsonify({'error': 'Access denied'}), 403
return jsonify({'stopped': container_manager.stop_container(name)})
except Exception as e:
logger.error(f"Error stopping container {name}: {e}")
return jsonify({'error': str(e)}), 500
@bp.route('/api/containers/<name>/restart', methods=['POST'])
def restart_container(name):
try:
from app import container_manager, is_local_request
if not is_local_request():
return jsonify({'error': 'Access denied'}), 403
return jsonify({'restarted': container_manager.restart_container(name)})
except Exception as e:
logger.error(f"Error restarting container {name}: {e}")
return jsonify({'error': str(e)}), 500
@bp.route('/api/containers/<name>/logs', methods=['GET'])
def get_container_logs(name):
try:
from app import container_manager, is_local_request
if not is_local_request():
return jsonify({'error': 'Access denied'}), 403
tail = request.args.get('tail', default=100, type=int)
return jsonify({'logs': container_manager.get_container_logs(name, tail=tail)})
except Exception as e:
logger.error(f"Error getting logs for container {name}: {e}")
return jsonify({'error': str(e)}), 500
@bp.route('/api/containers/<name>/stats', methods=['GET'])
def get_container_stats(name):
try:
from app import container_manager, is_local_request
if not is_local_request():
return jsonify({'error': 'Access denied'}), 403
return jsonify(container_manager.get_container_stats(name))
except Exception as e:
logger.error(f"Error getting stats for container {name}: {e}")
return jsonify({'error': str(e)}), 500
@bp.route('/api/containers', methods=['POST'])
def create_container():
try:
from app import container_manager, is_local_request
if not is_local_request():
return jsonify({'error': 'Access denied'}), 403
data = request.get_json(silent=True)
if not data or 'image' not in data:
return jsonify({'error': 'Missing image parameter'}), 400
name = data.get('name', '')
env = data.get('env', {})
secrets = data.get('secrets', [])
if secrets:
for secret_name in secrets:
secret_value = current_app.vault_manager.get_secret(secret_name)
if secret_value is not None:
env[secret_name] = secret_value
volumes = data.get('volumes', {})
if volumes:
allowed_prefixes = ('/home/roof/pic/data/', '/home/roof/pic/config/', '/tmp/')
for host_path in volumes.keys():
resolved = os.path.realpath(str(host_path))
if not any(resolved.startswith(p) for p in allowed_prefixes):
return jsonify({'error': f'Volume mount not allowed: {host_path}'}), 403
result = container_manager.create_container(
image=data['image'],
name=name,
env=env,
volumes=volumes,
command=data.get('command', ''),
ports=data.get('ports', {})
)
if 'error' in result:
return jsonify(result), 500
return jsonify(result)
except Exception as e:
return jsonify({'error': str(e)}), 500
@bp.route('/api/containers/<name>', methods=['DELETE'])
def remove_container(name):
try:
from app import container_manager, is_local_request
if not is_local_request():
return jsonify({'error': 'Access denied'}), 403
force = request.args.get('force', default=False, type=bool)
return jsonify({'removed': container_manager.remove_container(name, force=force)})
except Exception as e:
return jsonify({'error': str(e)}), 500
@bp.route('/api/images', methods=['GET'])
def list_images():
try:
from app import container_manager, is_local_request
if not is_local_request():
return jsonify({'error': 'Access denied'}), 403
return jsonify(container_manager.list_images())
except Exception as e:
return jsonify({'error': str(e)}), 500
@bp.route('/api/images/pull', methods=['POST'])
def pull_image():
try:
from app import container_manager, is_local_request
if not is_local_request():
return jsonify({'error': 'Access denied'}), 403
data = request.get_json(silent=True)
if not data or 'image' not in data:
return jsonify({'error': 'Missing image parameter'}), 400
result = container_manager.pull_image(data['image'])
if 'error' in result:
return jsonify(result), 500
return jsonify(result)
except Exception as e:
return jsonify({'error': str(e)}), 500
@bp.route('/api/images/<image>', methods=['DELETE'])
def remove_image(image):
try:
from app import container_manager, is_local_request
if not is_local_request():
return jsonify({'error': 'Access denied'}), 403
force = request.args.get('force', default=False, type=bool)
return jsonify({'removed': container_manager.remove_image(image, force=force)})
except Exception as e:
return jsonify({'error': str(e)}), 500
@bp.route('/api/volumes', methods=['GET'])
def list_volumes():
try:
from app import container_manager, is_local_request
if not is_local_request():
return jsonify({'error': 'Access denied'}), 403
return jsonify(container_manager.list_volumes())
except Exception as e:
return jsonify({'error': str(e)}), 500
@bp.route('/api/volumes', methods=['POST'])
def create_volume():
try:
from app import container_manager, is_local_request
if not is_local_request():
return jsonify({'error': 'Access denied'}), 403
data = request.get_json(silent=True)
if not data or 'name' not in data:
return jsonify({'error': 'Missing name parameter'}), 400
result = container_manager.create_volume(data['name'])
if 'error' in result:
return jsonify(result), 500
return jsonify(result)
except Exception as e:
return jsonify({'error': str(e)}), 500
@bp.route('/api/volumes/<name>', methods=['DELETE'])
def remove_volume(name):
try:
from app import container_manager, is_local_request
if not is_local_request():
return jsonify({'error': 'Access denied'}), 403
force = request.args.get('force', default=False, type=bool)
return jsonify({'removed': container_manager.remove_volume(name, force=force)})
except Exception as e:
return jsonify({'error': str(e)}), 500
+92
View File
@@ -0,0 +1,92 @@
import logging
from flask import Blueprint, request, jsonify
logger = logging.getLogger('picell')
bp = Blueprint('email', __name__)
@bp.route('/api/email/users', methods=['GET'])
def get_email_users():
"""Get email users."""
try:
from app import email_manager
users = email_manager.get_users()
return jsonify(users)
except Exception as e:
logger.error(f"Error getting email users: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/email/users', methods=['POST'])
def create_email_user():
"""Create email user."""
try:
from app import email_manager, _configured_domain
data = request.get_json(silent=True)
if data is None:
return jsonify({"error": "No data provided"}), 400
username = data.get('username')
domain = data.get('domain') or _configured_domain()
password = data.get('password')
if not username or not password:
return jsonify({"error": "Missing required fields: username, password"}), 400
result = email_manager.create_email_user(username, domain, password)
return jsonify({"created": result})
except Exception as e:
logger.error(f"Error creating email user: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/email/users/<username>', methods=['DELETE'])
def delete_email_user(username):
"""Delete email user."""
try:
from app import email_manager, _configured_domain
domain = request.args.get('domain') or _configured_domain()
result = email_manager.delete_email_user(username, domain)
return jsonify({"deleted": result})
except Exception as e:
logger.error(f"Error deleting email user: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/email/status', methods=['GET'])
def get_email_status():
"""Get email service status."""
try:
from app import email_manager
status = email_manager.get_status()
return jsonify(status)
except Exception as e:
logger.error(f"Error getting email status: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/email/connectivity', methods=['GET'])
def test_email_connectivity():
"""Test email connectivity."""
try:
from app import email_manager
result = email_manager.test_connectivity()
return jsonify(result)
except Exception as e:
logger.error(f"Error testing email connectivity: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/email/send', methods=['POST'])
def send_email():
try:
from app import email_manager
data = request.get_json(silent=True)
if data is None:
return jsonify({"error": "No data provided"}), 400
result = email_manager.send_email(data)
return jsonify(result)
except Exception as e:
logger.error(f"Error sending email: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/email/mailbox/<username>', methods=['GET'])
def get_mailbox_info(username):
"""Get mailbox information."""
try:
from app import email_manager
result = email_manager.get_mailbox_info(username)
return jsonify(result)
except Exception as e:
logger.error(f"Error getting mailbox info: {e}")
return jsonify({"error": str(e)}), 500
+159
View File
@@ -0,0 +1,159 @@
import logging
from flask import Blueprint, request, jsonify
logger = logging.getLogger('picell')
bp = Blueprint('files', __name__)
@bp.route('/api/files/users', methods=['GET'])
def get_file_users():
"""Get file storage users."""
try:
from app import file_manager
users = file_manager.get_users()
return jsonify(users)
except Exception as e:
logger.error(f"Error getting file users: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/files/users', methods=['POST'])
def create_file_user():
"""Create file storage user."""
try:
from app import file_manager
data = request.get_json(silent=True)
if data is None:
return jsonify({"error": "No data provided"}), 400
username = data.get('username')
password = data.get('password')
if not username or not password:
return jsonify({"error": "Missing required fields: username, password"}), 400
result = file_manager.create_user(username, password)
return jsonify({"created": result})
except Exception as e:
logger.error(f"Error creating file user: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/files/users/<username>', methods=['DELETE'])
def delete_file_user(username):
"""Delete file storage user."""
try:
from app import file_manager
result = file_manager.delete_user(username)
return jsonify(result)
except Exception as e:
logger.error(f"Error deleting file user: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/files/folders', methods=['POST'])
def create_folder():
"""Create folder."""
try:
from app import file_manager
data = request.get_json(silent=True)
if data is None:
return jsonify({"error": "No data provided"}), 400
username = data.get('username')
folder_path = data.get('folder_path') or data.get('path')
if not username or not folder_path:
return jsonify({"error": "Missing required fields: username, folder_path"}), 400
result = file_manager.create_folder(username, folder_path)
return jsonify({"created": result})
except ValueError as e:
return jsonify({"error": str(e)}), 400
except Exception as e:
logger.error(f"Error creating folder: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/files/folders/<username>/<path:folder_path>', methods=['DELETE'])
def delete_folder(username, folder_path):
"""Delete folder."""
try:
from app import file_manager
result = file_manager.delete_folder(username, folder_path)
return jsonify(result)
except ValueError as e:
return jsonify({"error": str(e)}), 400
except Exception as e:
logger.error(f"Error deleting folder: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/files/upload/<username>', methods=['POST'])
def upload_file(username):
"""Upload file."""
try:
from app import file_manager
if 'file' not in request.files:
return jsonify({"error": "No file provided"}), 400
file = request.files['file']
path = request.form.get('path', '') or file.filename or ''
file_data = file.read()
result = file_manager.upload_file(username, path, file_data)
return jsonify({"uploaded": result})
except ValueError as e:
return jsonify({"error": str(e)}), 400
except Exception as e:
logger.error(f"Error uploading file: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/files/download/<username>/<path:file_path>', methods=['GET'])
def download_file(username, file_path):
"""Download file."""
try:
from app import file_manager
result = file_manager.download_file(username, file_path)
return jsonify(result)
except ValueError as e:
return jsonify({"error": str(e)}), 400
except Exception as e:
logger.error(f"Error downloading file: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/files/delete/<username>/<path:file_path>', methods=['DELETE'])
def delete_file(username, file_path):
"""Delete file."""
try:
from app import file_manager
result = file_manager.delete_file(username, file_path)
return jsonify(result)
except ValueError as e:
return jsonify({"error": str(e)}), 400
except Exception as e:
logger.error(f"Error deleting file: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/files/list/<username>', methods=['GET'])
def list_files(username):
"""List files."""
try:
from app import file_manager
folder = request.args.get('folder', '')
result = file_manager.list_files(username, folder)
return jsonify(result)
except ValueError as e:
return jsonify({"error": str(e)}), 400
except Exception as e:
logger.error(f"Error listing files: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/files/status', methods=['GET'])
def get_file_status():
"""Get file service status."""
try:
from app import file_manager
status = file_manager.get_status()
return jsonify(status)
except Exception as e:
logger.error(f"Error getting file status: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/files/connectivity', methods=['GET'])
def test_file_connectivity():
"""Test file service connectivity."""
try:
from app import file_manager
result = file_manager.test_connectivity()
return jsonify(result)
except Exception as e:
logger.error(f"Error testing file connectivity: {e}")
return jsonify({"error": str(e)}), 500
+109
View File
@@ -0,0 +1,109 @@
import logging
from flask import Blueprint, request, jsonify
logger = logging.getLogger('picell')
bp = Blueprint('network', __name__)
@bp.route('/api/dns/records', methods=['GET'])
def get_dns_records():
try:
from app import network_manager
return jsonify(network_manager.get_dns_records())
except Exception as e:
logger.error(f"Error getting DNS records: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/dns/records', methods=['POST'])
def add_dns_record():
try:
from app import network_manager
data = request.get_json(silent=True)
if data is None:
return jsonify({"error": "No data provided"}), 400
return jsonify(network_manager.add_dns_record(**data))
except Exception as e:
logger.error(f"Error adding DNS record: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/dns/records', methods=['DELETE'])
def remove_dns_record():
try:
from app import network_manager
data = request.get_json(silent=True)
return jsonify(network_manager.remove_dns_record(**data))
except Exception as e:
logger.error(f"Error removing DNS record: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/dhcp/leases', methods=['GET'])
def get_dhcp_leases():
try:
from app import network_manager
return jsonify(network_manager.get_dhcp_leases())
except Exception as e:
logger.error(f"Error getting DHCP leases: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/dhcp/reservations', methods=['POST'])
def add_dhcp_reservation():
try:
from app import network_manager
data = request.get_json(silent=True)
if not data:
return jsonify({"error": "No data provided"}), 400
for field in ('mac', 'ip'):
if field not in data:
return jsonify({"error": f"Missing required field: {field}"}), 400
result = network_manager.add_dhcp_reservation(data['mac'], data['ip'], data.get('hostname', ''))
return jsonify({"success": result})
except Exception as e:
logger.error(f"Error adding DHCP reservation: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/dhcp/reservations', methods=['DELETE'])
def remove_dhcp_reservation():
try:
from app import network_manager
data = request.get_json(silent=True)
if not data or 'mac' not in data:
return jsonify({"error": "Missing required field: mac"}), 400
result = network_manager.remove_dhcp_reservation(data['mac'])
return jsonify({"success": result})
except Exception as e:
logger.error(f"Error removing DHCP reservation: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/ntp/status', methods=['GET'])
def get_ntp_status():
try:
from app import network_manager
return jsonify(network_manager.get_ntp_status())
except Exception as e:
logger.error(f"Error getting NTP status: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/network/info', methods=['GET'])
def get_network_info():
try:
from app import network_manager
return jsonify(network_manager.get_network_info())
except Exception as e:
logger.error(f"Error getting network info: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/dns/status', methods=['GET'])
def get_dns_status():
try:
from app import network_manager
return jsonify(network_manager.get_dns_status())
except Exception as e:
logger.error(f"Error getting DNS status: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/network/test', methods=['POST'])
def test_network():
try:
from app import network_manager
return jsonify(network_manager.test_connectivity())
except Exception as e:
logger.error(f"Error testing network: {e}")
return jsonify({"error": str(e)}), 500
+115
View File
@@ -0,0 +1,115 @@
import logging
from flask import Blueprint, jsonify, session
logger = logging.getLogger('picell')
bp = Blueprint('peer_dashboard', __name__)
@bp.route('/api/peer/dashboard', methods=['GET'])
def peer_dashboard():
try:
from app import peer_registry, wireguard_manager, _configured_domain
peer_name = session.get('peer_name')
peer = peer_registry.get_peer(peer_name) if peer_name else None
if not peer:
return jsonify({'error': 'Peer not found'}), 404
wg_stats = {'online': None, 'transfer_rx': 0, 'transfer_tx': 0, 'last_handshake': None}
public_key = peer.get('public_key')
if public_key:
try:
wg_stats = wireguard_manager.get_peer_status(public_key)
except Exception:
pass
peer_ip = peer.get('ip', '')
allowed_ips = f"{peer_ip.split('/')[0]}/32" if peer_ip else ''
domain = _configured_domain()
_svc_url_map = {
'calendar': f'http://calendar.{domain}',
'files': f'http://files.{domain}',
'mail': f'http://mail.{domain}',
'webdav': f'http://webdav.{domain}',
}
service_urls = {
svc: _svc_url_map[svc]
for svc in peer.get('service_access', [])
if svc in _svc_url_map
}
return jsonify({
'name': peer_name,
'ip': peer_ip,
'service_access': peer.get('service_access', []),
'service_urls': service_urls,
'online': wg_stats.get('online'),
'transfer_rx': wg_stats.get('transfer_rx', 0),
'transfer_tx': wg_stats.get('transfer_tx', 0),
'last_handshake': wg_stats.get('last_handshake'),
'allowed_ips': peer.get('allowed_ips', allowed_ips),
})
except Exception as e:
return jsonify({'error': str(e)}), 500
@bp.route('/api/peer/services', methods=['GET'])
def peer_services():
try:
from app import peer_registry, wireguard_manager, config_manager, _configured_domain, _resolve_peer_dns
peer_name = session.get('peer_name')
peer = peer_registry.get_peer(peer_name) if peer_name else None
if not peer:
return jsonify({'error': 'Peer not found'}), 404
domain = _configured_domain()
peer_ip = peer.get('ip', '')
server_public_key = ''
wg_port = 51820
server_endpoint = ''
try:
server_public_key = wireguard_manager.get_keys().get('public_key', '')
wg_port = config_manager.configs.get('_identity', {}).get('wireguard_port', 51820)
srv = wireguard_manager.get_server_config()
server_endpoint = srv.get('endpoint') or '<SERVER_IP>'
except Exception:
pass
wg_config = ''
peer_private_key = peer.get('private_key', '')
if peer_private_key:
try:
internet_access = peer.get('internet_access', True)
allowed_ips = wireguard_manager.FULL_TUNNEL_IPS if internet_access else wireguard_manager.get_split_tunnel_ips()
wg_config = wireguard_manager.get_peer_config(
peer_name=peer_name,
peer_ip=peer_ip,
peer_private_key=peer_private_key,
server_endpoint=server_endpoint,
allowed_ips=allowed_ips,
)
except Exception:
pass
return jsonify({
'username': peer_name,
'wireguard': {
'ip': peer_ip,
'server_public_key': server_public_key,
'endpoint_port': wg_port,
'dns': _resolve_peer_dns(),
'config': wg_config,
},
'email': {
'address': f'{peer_name}@{domain}',
'smtp': {'host': f'mail.{domain}', 'port': 587},
'imap': {'host': f'mail.{domain}', 'port': 993},
},
'caldav': {
'url': f'http://calendar.{domain}',
'username': peer_name,
},
'files': {
'url': f'http://files.{domain}',
'username': peer_name,
},
})
except Exception as e:
return jsonify({'error': str(e)}), 500
+299
View File
@@ -0,0 +1,299 @@
import logging
import ipaddress
from flask import Blueprint, request, jsonify, session
logger = logging.getLogger('picell')
bp = Blueprint('peers', __name__)
def _next_peer_ip() -> str:
"""Auto-assign the next free host address from the configured VPN subnet."""
from app import wireguard_manager, peer_registry
server_addr = wireguard_manager._get_configured_address()
network = ipaddress.ip_network(server_addr, strict=False)
server_ip = str(ipaddress.ip_interface(server_addr).ip)
used = {p.get('ip', '').split('/')[0] for p in peer_registry.list_peers()}
for host in network.hosts():
ip = str(host)
if ip == server_ip:
continue
if ip not in used:
return ip
raise ValueError(f'No free IPs left in {network}')
@bp.route('/api/peers', methods=['GET'])
def get_peers():
try:
from app import peer_registry
return jsonify(peer_registry.list_peers())
except Exception as e:
logger.error(f"Error getting peers: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/peers', methods=['POST'])
def add_peer():
"""Add a peer and auto-provision auth/email/calendar/files accounts."""
try:
from app import (peer_registry, wireguard_manager, firewall_manager,
email_manager, calendar_manager, file_manager, auth_manager,
cell_link_manager, _configured_domain, COREFILE_PATH)
data = request.get_json(silent=True)
if data is None:
return jsonify({"error": "No data provided"}), 400
for field in ('name', 'public_key'):
if field not in data:
return jsonify({"error": f"Missing required field: {field}"}), 400
password = data.get('password') or ''
if not password:
return jsonify({"error": "Missing required field: password"}), 400
if len(password) < 10:
return jsonify({"error": "password must be at least 10 characters"}), 400
try:
assigned_ip = data.get('ip') or _next_peer_ip()
except ValueError as e:
return jsonify({'error': str(e)}), 409
_valid_services = {'calendar', 'files', 'mail', 'webdav'}
service_access = data.get('service_access', list(_valid_services))
if not isinstance(service_access, list) or not all(s in _valid_services for s in service_access):
return jsonify({"error": f"service_access must be a list of: {sorted(_valid_services)}"}), 400
peer_name = data['name']
if not auth_manager.create_user(peer_name, password, 'peer'):
return jsonify({"error": "Could not create auth account (duplicate name?)"}), 400
provisioned = ['auth']
domain = _configured_domain()
for step_name, step_fn in [
('email', lambda: email_manager.create_email_user(peer_name, domain, password)),
('calendar', lambda: calendar_manager.create_calendar_user(peer_name, password)),
('files', lambda: file_manager.create_user(peer_name, password)),
]:
try:
if step_fn():
provisioned.append(step_name)
else:
logger.warning(f"Peer {peer_name}: {step_name} account creation returned False")
except Exception as e:
logger.warning(f"Peer {peer_name}: {step_name} account creation failed (non-fatal): {e}")
peer_info = {
'peer': peer_name,
'ip': assigned_ip,
'public_key': data['public_key'],
'private_key': data.get('private_key'),
'server_public_key': data.get('server_public_key'),
'server_endpoint': data.get('server_endpoint'),
'allowed_ips': data.get('allowed_ips'),
'persistent_keepalive': data.get('persistent_keepalive'),
'description': data.get('description'),
'internet_access': data.get('internet_access', True),
'service_access': service_access,
'peer_access': data.get('peer_access', True),
'config_needs_reinstall': False,
}
peer_added_to_registry = False
firewall_applied = False
try:
success = peer_registry.add_peer(peer_info)
if not success:
for svc in ('files', 'calendar', 'email', 'auth'):
try:
if svc == 'files':
file_manager.delete_user(peer_name)
elif svc == 'calendar':
calendar_manager.delete_calendar_user(peer_name)
elif svc == 'email':
email_manager.delete_email_user(peer_name, _configured_domain())
elif svc == 'auth':
auth_manager.delete_user(peer_name)
except Exception:
pass
return jsonify({"error": f"Peer {peer_name} already exists"}), 400
peer_added_to_registry = True
firewall_manager.apply_peer_rules(peer_info['ip'], peer_info)
firewall_applied = True
wg_allowed = f"{assigned_ip}/32" if '/' not in assigned_ip else assigned_ip
try:
wireguard_manager.add_peer(peer_name, data['public_key'], endpoint_ip='', allowed_ips=wg_allowed)
except Exception as wg_err:
logger.warning(f"Peer {peer_name}: WireGuard server config update failed (non-fatal): {wg_err}")
firewall_manager.apply_all_dns_rules(peer_registry.list_peers(), COREFILE_PATH, _configured_domain(),
cell_links=cell_link_manager.list_connections())
return jsonify({"message": f"Peer {peer_name} added successfully", "ip": assigned_ip}), 201
except Exception as e:
if firewall_applied:
try:
firewall_manager.clear_peer_rules(peer_info['ip'])
except Exception:
pass
if peer_added_to_registry:
try:
peer_registry.remove_peer(peer_name)
except Exception:
pass
logger.error(f"Error adding peer {peer_name}: {e}")
return jsonify({'error': str(e)}), 500
except Exception as e:
logger.error(f"Error adding peer: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/peers/<peer_name>', methods=['PUT'])
def update_peer(peer_name):
try:
from app import peer_registry, firewall_manager, cell_link_manager, _configured_domain, COREFILE_PATH
data = request.get_json(silent=True) or {}
existing = peer_registry.get_peer(peer_name)
if not existing:
return jsonify({"error": "Peer not found"}), 404
config_changed = (
('internet_access' in data and data['internet_access'] != existing.get('internet_access', True)) or
('ip' in data and data['ip'] != existing.get('ip')) or
('persistent_keepalive' in data and data['persistent_keepalive'] != existing.get('persistent_keepalive'))
)
updates = {k: v for k, v in data.items()}
if config_changed:
updates['config_needs_reinstall'] = True
success = peer_registry.update_peer(peer_name, updates)
if success:
updated_peer = peer_registry.get_peer(peer_name)
if updated_peer:
firewall_manager.apply_peer_rules(updated_peer['ip'], updated_peer)
firewall_manager.apply_all_dns_rules(peer_registry.list_peers(), COREFILE_PATH, _configured_domain(),
cell_links=cell_link_manager.list_connections())
return jsonify({"message": f"Peer {peer_name} updated", "config_changed": config_changed})
return jsonify({"error": "Update failed"}), 500
except Exception as e:
logger.error(f"Error updating peer {peer_name}: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/peers/<peer_name>/clear-reinstall', methods=['POST'])
def clear_peer_reinstall(peer_name):
try:
from app import peer_registry
peer_registry.clear_reinstall_flag(peer_name)
return jsonify({"message": "Reinstall flag cleared"})
except Exception as e:
return jsonify({"error": str(e)}), 500
@bp.route('/api/peers/<peer_name>', methods=['DELETE'])
def remove_peer(peer_name):
try:
from app import (peer_registry, wireguard_manager, firewall_manager,
email_manager, calendar_manager, file_manager, auth_manager,
cell_link_manager, _configured_domain, COREFILE_PATH)
peer = peer_registry.get_peer(peer_name)
if not peer:
return jsonify({"message": f"Peer {peer_name} not found or already removed"})
peer_ip = peer.get('ip')
peer_pubkey = peer.get('public_key', '')
success = peer_registry.remove_peer(peer_name)
if success:
if peer_ip:
firewall_manager.clear_peer_rules(peer_ip)
firewall_manager.apply_all_dns_rules(peer_registry.list_peers(), COREFILE_PATH, _configured_domain(),
cell_links=cell_link_manager.list_connections())
if peer_pubkey:
try:
wireguard_manager.remove_peer(peer_pubkey)
except Exception as wg_err:
logger.warning(f"Peer {peer_name}: WireGuard removal failed (non-fatal): {wg_err}")
for _cleanup in [
lambda: email_manager.delete_email_user(peer_name, _configured_domain()),
lambda: calendar_manager.delete_calendar_user(peer_name),
lambda: file_manager.delete_user(peer_name),
lambda: auth_manager.delete_user(peer_name),
]:
try:
_cleanup()
except Exception:
pass
return jsonify({"message": f"Peer {peer_name} removed successfully"})
except Exception as e:
logger.error(f"Error removing peer: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/peers/register', methods=['POST'])
def register_peer():
try:
from app import peer_registry
data = request.get_json(silent=True)
if data is None:
return jsonify({"error": "No data provided"}), 400
return jsonify(peer_registry.register_peer(data))
except Exception as e:
logger.error(f"Error registering peer: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/peers/<peer_name>/unregister', methods=['DELETE'])
def unregister_peer(peer_name):
try:
from app import peer_registry
return jsonify(peer_registry.unregister_peer(peer_name))
except Exception as e:
logger.error(f"Error unregistering peer: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/peers/<peer_name>/update-ip', methods=['PUT'])
def update_peer_ip_registry(peer_name):
try:
from app import peer_registry, routing_manager
data = request.get_json(silent=True)
new_ip = data.get('ip') if data else None
if not new_ip:
return jsonify({"error": "Missing ip"}), 400
success = peer_registry.update_peer_ip(peer_name, new_ip)
if success:
try:
routing_manager.update_peer_ip(peer_name, new_ip)
except Exception as e:
logger.warning(f"RoutingManager update_peer_ip failed: {e}")
return jsonify({"message": f"IP update received for {peer_name}"})
return jsonify({"error": f"Peer {peer_name} not found"}), 404
except Exception as e:
logger.error(f"Error updating peer IP: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/ip-update', methods=['POST'])
def ip_update():
try:
from app import peer_registry, routing_manager
data = request.get_json(silent=True)
if data is None:
return jsonify({"error": "No data provided"}), 400
peer_name = data.get('peer')
new_ip = data.get('ip')
if not peer_name or not new_ip:
return jsonify({"error": "Missing peer or ip"}), 400
success = peer_registry.update_peer_ip(peer_name, new_ip)
if success:
try:
routing_manager.update_peer_ip(peer_name, new_ip)
except Exception as e:
logger.warning(f"RoutingManager update_peer_ip failed: {e}")
return jsonify({"message": f"IP update received for {peer_name}"})
return jsonify({"error": f"Peer {peer_name} not found"}), 404
except Exception as e:
logger.error(f"Error handling IP update: {e}")
return jsonify({"error": str(e)}), 500
+207
View File
@@ -0,0 +1,207 @@
import logging
from flask import Blueprint, request, jsonify
logger = logging.getLogger('picell')
bp = Blueprint('routing', __name__)
@bp.route('/api/routing/status', methods=['GET'])
def get_routing_status():
try:
from app import routing_manager
return jsonify(routing_manager.get_status())
except Exception as e:
logger.error(f"Error getting routing status: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/routing/setup', methods=['POST'])
def setup_routing():
try:
from app import routing_manager
status = routing_manager.get_status()
return jsonify({'success': True, 'message': 'Routing managed by WireGuard PostUp rules', **status})
except Exception as e:
return jsonify({"error": str(e)}), 500
@bp.route('/api/routing/nat', methods=['GET'])
def get_nat_rules():
try:
from app import routing_manager
return jsonify({"nat_rules": routing_manager.get_nat_rules()})
except Exception as e:
logger.error(f"Error getting NAT rules: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/routing/nat', methods=['POST'])
def add_nat_rule():
try:
from app import routing_manager
data = request.get_json(silent=True) or {}
result = routing_manager.add_nat_rule(
source_network=data.get('source_network'),
target_interface=data.get('target_interface'),
masquerade=data.get('masquerade', True),
nat_type=data.get('nat_type', 'MASQUERADE'),
protocol=data.get('protocol', 'ALL'),
external_port=data.get('external_port'),
internal_ip=data.get('internal_ip'),
internal_port=data.get('internal_port')
)
return jsonify({'success': result})
except Exception as e:
logger.error(f"Error adding NAT rule: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/routing/nat/<rule_id>', methods=['DELETE'])
def remove_nat_rule(rule_id):
try:
from app import routing_manager
return jsonify(routing_manager.remove_nat_rule(rule_id))
except Exception as e:
logger.error(f"Error removing NAT rule: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/routing/peers', methods=['GET'])
def get_peer_routes():
try:
from app import routing_manager
return jsonify({"peer_routes": routing_manager.get_peer_routes()})
except Exception as e:
logger.error(f"Error getting peer routes: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/routing/peers', methods=['POST'])
def add_peer_route():
try:
from app import routing_manager
data = request.get_json(silent=True) or {}
peer_name = data.get('peer_name')
peer_ip = data.get('peer_ip')
if not peer_name or not peer_ip:
return jsonify({"error": "Missing required fields: peer_name, peer_ip"}), 400
result = routing_manager.add_peer_route(
peer_name, peer_ip,
data.get('allowed_networks', []),
data.get('route_type', 'lan')
)
return jsonify({"added": result})
except Exception as e:
logger.error(f"Error adding peer route: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/routing/peers/<peer_name>', methods=['DELETE'])
def remove_peer_route(peer_name):
try:
from app import routing_manager
return jsonify(routing_manager.remove_peer_route(peer_name))
except Exception as e:
logger.error(f"Error removing peer route: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/routing/exit-nodes', methods=['POST'])
def add_exit_node():
try:
from app import routing_manager
data = request.get_json(silent=True) or {}
peer_name = data.get('peer_name')
peer_ip = data.get('peer_ip')
if not peer_name or not peer_ip:
return jsonify({"error": "Missing required fields: peer_name, peer_ip"}), 400
return jsonify({"added": routing_manager.add_exit_node(peer_name, peer_ip, data.get('allowed_domains'))})
except Exception as e:
logger.error(f"Error adding exit node: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/routing/bridge', methods=['POST'])
def add_bridge_route():
try:
from app import routing_manager
data = request.get_json(silent=True) or {}
source_peer = data.get('source_peer')
target_peer = data.get('target_peer')
if not source_peer or not target_peer:
return jsonify({"error": "Missing required fields: source_peer, target_peer"}), 400
return jsonify({"added": routing_manager.add_bridge_route(source_peer, target_peer, data.get('allowed_networks', []))})
except Exception as e:
logger.error(f"Error adding bridge route: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/routing/split', methods=['POST'])
def add_split_route():
try:
from app import routing_manager
data = request.get_json(silent=True) or {}
network = data.get('network')
exit_peer = data.get('exit_peer')
if not network or not exit_peer:
return jsonify({"error": "Missing required fields: network, exit_peer"}), 400
return jsonify({"added": routing_manager.add_split_route(network, exit_peer, data.get('fallback_peer'))})
except Exception as e:
logger.error(f"Error adding split route: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/routing/firewall', methods=['GET'])
def get_firewall_rules():
try:
from app import routing_manager
return jsonify({"firewall_rules": routing_manager.get_firewall_rules()})
except Exception as e:
logger.error(f"Error getting firewall rules: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/routing/firewall', methods=['POST'])
def add_firewall_rule():
try:
from app import routing_manager
data = request.get_json(silent=True) or {}
result = routing_manager.add_firewall_rule(
rule_type=data.get('rule_type'),
source=data.get('source'),
destination=data.get('destination'),
action=data.get('action', 'ACCEPT'),
port=data.get('port'),
protocol=data.get('protocol', 'ALL'),
port_range=data.get('port_range')
)
return jsonify({'success': result})
except Exception as e:
logger.error(f"Error adding firewall rule: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/routing/firewall/<rule_id>', methods=['DELETE'])
def remove_firewall_rule(rule_id):
try:
from app import routing_manager
result = routing_manager.remove_firewall_rule(rule_id)
return jsonify({'success': result}), (200 if result else 404)
except Exception as e:
return jsonify({'error': str(e)}), 500
@bp.route('/api/routing/live-iptables', methods=['GET'])
def get_live_iptables():
try:
from app import routing_manager
return jsonify(routing_manager.get_live_iptables())
except Exception as e:
return jsonify({'error': str(e)}), 500
@bp.route('/api/routing/connectivity', methods=['POST'])
def test_routing_connectivity():
try:
from app import routing_manager
data = request.get_json(silent=True) or {}
return jsonify(routing_manager.test_routing_connectivity(
data.get('target_ip', '8.8.8.8'),
data.get('via_peer')
))
except Exception as e:
logger.error(f"Error testing routing connectivity: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/routing/logs', methods=['GET'])
def get_routing_logs():
try:
from app import routing_manager
lines = request.args.get('lines', 50, type=int)
return jsonify(routing_manager.get_logs(lines))
except Exception as e:
logger.error(f"Error getting routing logs: {e}")
return jsonify({"error": str(e)}), 500
+291
View File
@@ -0,0 +1,291 @@
import logging
import json
import os
from datetime import datetime
from flask import Blueprint, request, jsonify
logger = logging.getLogger('picell')
bp = Blueprint('services', __name__)
@bp.route('/api/services/bus/status', methods=['GET'])
def get_service_bus_status():
try:
from app import service_bus
return jsonify(service_bus.get_service_status_summary())
except Exception as e:
logger.error(f"Error getting service bus status: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/services/bus/events', methods=['GET'])
def get_service_bus_events():
try:
from app import service_bus
from service_bus import EventType
event_type = request.args.get('type')
source = request.args.get('source')
limit = int(request.args.get('limit', 100))
events = service_bus.get_event_history(
EventType(event_type) if event_type else None,
source,
limit
)
return jsonify([{
'event_id': e.event_id,
'event_type': e.event_type.value,
'source': e.source,
'data': e.data,
'timestamp': e.timestamp.isoformat()
} for e in events])
except Exception as e:
logger.error(f"Error getting service bus events: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/services/bus/services/<service_name>/start', methods=['POST'])
def start_service(service_name):
try:
from app import service_bus
success = service_bus.orchestrate_service_start(service_name)
if success:
return jsonify({"message": f"Service {service_name} started successfully"})
return jsonify({"error": f"Failed to start service {service_name}"}), 500
except Exception as e:
logger.error(f"Error starting service {service_name}: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/services/bus/services/<service_name>/stop', methods=['POST'])
def stop_service(service_name):
try:
from app import service_bus
success = service_bus.orchestrate_service_stop(service_name)
if success:
return jsonify({"message": f"Service {service_name} stopped successfully"})
return jsonify({"error": f"Failed to stop service {service_name}"}), 500
except Exception as e:
logger.error(f"Error stopping service {service_name}: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/services/bus/services/<service_name>/restart', methods=['POST'])
def restart_service(service_name):
try:
from app import service_bus
success = service_bus.orchestrate_service_restart(service_name)
if success:
return jsonify({"message": f"Service {service_name} restarted successfully"})
return jsonify({"error": f"Failed to restart service {service_name}"}), 500
except Exception as e:
logger.error(f"Error restarting service {service_name}: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/logs/services/<service>', methods=['GET'])
def get_service_logs(service):
try:
from app import log_manager
level = request.args.get('level', 'INFO')
lines = int(request.args.get('lines', 50))
logs = log_manager.get_service_logs(service, level, lines)
return jsonify({"service": service, "logs": logs})
except Exception as e:
logger.error(f"Error getting logs for {service}: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/logs/search', methods=['POST'])
def search_logs():
try:
from app import log_manager
data = request.get_json(silent=True) or {}
results = log_manager.search_logs(
data.get('query', ''),
data.get('time_range'),
data.get('services'),
data.get('level')
)
return jsonify({"results": results, "count": len(results)})
except Exception as e:
logger.error(f"Error searching logs: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/logs/export', methods=['POST'])
def export_logs():
try:
from app import log_manager
data = request.get_json(silent=True) or {}
format = data.get('format', 'json')
log_data = log_manager.export_logs(format, data.get('filters', {}))
return jsonify({"logs": log_data, "format": format})
except Exception as e:
logger.error(f"Error exporting logs: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/logs/statistics', methods=['GET'])
def get_log_statistics():
try:
from app import log_manager
return jsonify(log_manager.get_log_statistics(request.args.get('service')))
except Exception as e:
logger.error(f"Error getting log statistics: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/logs/rotate', methods=['POST'])
def rotate_logs():
try:
from app import log_manager
data = request.get_json(silent=True) or {}
log_manager.rotate_logs(data.get('service'))
return jsonify({"message": "Logs rotated successfully"})
except Exception as e:
logger.error(f"Error rotating logs: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/logs/files', methods=['GET'])
def get_log_file_infos():
try:
from app import log_manager
return jsonify(log_manager.get_all_log_file_infos())
except Exception as e:
logger.error(f"Error listing log files: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/logs/verbosity', methods=['GET'])
def get_log_verbosity():
try:
from app import log_manager
return jsonify(log_manager.get_service_levels())
except Exception as e:
logger.error(f"Error getting log verbosity: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/logs/verbosity', methods=['PUT'])
def set_log_verbosity():
try:
from app import log_manager
data = request.get_json(silent=True) or {}
for service, level in data.items():
log_manager.set_service_level(service, level)
levels_file = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'log_levels.json')
os.makedirs(os.path.dirname(levels_file), exist_ok=True)
current = {}
if os.path.exists(levels_file):
try:
with open(levels_file) as f:
current = json.load(f)
except Exception:
pass
current.update(data)
with open(levels_file, 'w') as f:
json.dump(current, f, indent=2)
return jsonify({"message": "Log levels updated", "levels": log_manager.get_service_levels()})
except Exception as e:
logger.error(f"Error setting log verbosity: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/services/status', methods=['GET'])
def get_all_services_status():
try:
from app import service_bus
services_status = {}
for service_name in service_bus.list_services():
try:
service = service_bus.get_service(service_name)
status = service.get_status()
if isinstance(status, dict):
clean_status = {
'status': status.get('status', 'unknown'),
'running': status.get('running', False),
'timestamp': status.get('timestamp', datetime.utcnow().isoformat())
}
if service_name == 'network':
clean_status.update({
'dns_status': status.get('dns_running', False),
'dhcp_status': status.get('dhcp_running', False),
'ntp_status': status.get('ntp_running', False)
})
elif service_name == 'wireguard':
clean_status.update({
'peers_count': status.get('peers_count', 0),
'interface': status.get('interface', 'unknown')
})
elif service_name == 'email':
clean_status.update({
'users_count': status.get('users_count', 0),
'domain': status.get('domain', 'unknown')
})
elif service_name == 'calendar':
clean_status.update({
'users_count': status.get('users_count', 0),
'calendars_count': status.get('calendars_count', 0)
})
elif service_name == 'files':
clean_status.update({
'users_count': status.get('users_count', 0),
'storage_used': status.get('total_storage_used', {})
})
elif service_name == 'routing':
clean_status.update({
'nat_rules_count': status.get('nat_rules_count', 0),
'peer_routes_count': status.get('peer_routes_count', 0),
'firewall_rules_count': status.get('firewall_rules_count', 0)
})
elif service_name == 'vault':
clean_status.update({
'certificates_count': status.get('certificates_count', 0),
'trusted_keys_count': status.get('trusted_keys_count', 0)
})
services_status[service_name] = clean_status
else:
services_status[service_name] = {'status': str(status), 'running': bool(status)}
except Exception as e:
services_status[service_name] = {'error': str(e), 'status': 'offline', 'running': False}
return jsonify({
"network": services_status.get('network', {}),
"wireguard": services_status.get('wireguard', {}),
"email": services_status.get('email', {}),
"calendar": services_status.get('calendar', {}),
"files": services_status.get('files', {}),
"routing": services_status.get('routing', {}),
"vault": services_status.get('vault', {}),
"timestamp": datetime.utcnow().isoformat()
})
except Exception as e:
logger.error(f"Error getting all services status: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/services/connectivity', methods=['GET'])
def test_all_services_connectivity():
try:
from app import service_bus
connectivity_results = {}
for service_name in service_bus.list_services():
try:
service = service_bus.get_service(service_name)
if hasattr(service, 'test_connectivity'):
connectivity_results[service_name] = service.test_connectivity()
else:
connectivity_results[service_name] = {'status': 'ok', 'message': 'No connectivity test available'}
except Exception as e:
connectivity_results[service_name] = {'status': 'error', 'message': str(e)}
return jsonify({
"network": connectivity_results.get('network', {}),
"wireguard": connectivity_results.get('wireguard', {}),
"email": connectivity_results.get('email', {}),
"calendar": connectivity_results.get('calendar', {}),
"files": connectivity_results.get('files', {}),
"routing": connectivity_results.get('routing', {}),
"timestamp": datetime.utcnow().isoformat()
})
except Exception as e:
logger.error(f"Error testing all services connectivity: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/logs', methods=['GET'])
def get_backend_logs():
log_file = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'picell.log')
lines = int(request.args.get('lines', 100))
try:
if not os.path.exists(log_file):
return jsonify({"error": "Log file not found."}), 404
with open(log_file, 'r', encoding='utf-8', errors='ignore') as f:
all_lines = f.readlines()
tail_lines = all_lines[-lines:] if lines > 0 else all_lines
return jsonify({"log": ''.join(tail_lines)})
except Exception as e:
logger.error(f"Error reading log file: {e}")
return jsonify({"error": str(e)}), 500
+165
View File
@@ -0,0 +1,165 @@
import logging
import os
from flask import Blueprint, request, jsonify, current_app
logger = logging.getLogger('picell')
bp = Blueprint('vault', __name__)
@bp.route('/api/vault/status', methods=['GET'])
def get_vault_status():
try:
return jsonify(current_app.vault_manager.get_status())
except Exception as e:
logger.error(f"Error getting vault status: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/vault/certificates', methods=['GET'])
def get_certificates():
try:
return jsonify(current_app.vault_manager.list_certificates())
except Exception as e:
logger.error(f"Error getting certificates: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/vault/certificates', methods=['POST'])
def generate_certificate():
try:
data = request.get_json(silent=True)
if data is None:
return jsonify({"error": "No data provided"}), 400
result = current_app.vault_manager.generate_certificate(
common_name=data['common_name'],
domains=data.get('domains', []),
key_size=data.get('key_size', 2048),
days=data.get('days', 365)
)
return jsonify(result)
except Exception as e:
logger.error(f"Error generating certificate: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/vault/certificates/<common_name>', methods=['DELETE'])
def revoke_certificate(common_name):
try:
return jsonify({"revoked": current_app.vault_manager.revoke_certificate(common_name)})
except Exception as e:
logger.error(f"Error revoking certificate: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/vault/ca/certificate', methods=['GET'])
def get_ca_certificate():
try:
return jsonify({"certificate": current_app.vault_manager.get_ca_certificate()})
except Exception as e:
logger.error(f"Error getting CA certificate: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/vault/age/public-key', methods=['GET'])
def get_age_public_key():
try:
return jsonify({"public_key": current_app.vault_manager.get_age_public_key()})
except Exception as e:
logger.error(f"Error getting Age public key: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/vault/trust/keys', methods=['GET'])
def get_trusted_keys():
try:
return jsonify(current_app.vault_manager.get_trusted_keys())
except Exception as e:
logger.error(f"Error getting trusted keys: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/vault/trust/keys', methods=['POST'])
def add_trusted_key():
try:
data = request.get_json(silent=True)
if data is None:
return jsonify({"error": "No data provided"}), 400
result = current_app.vault_manager.add_trusted_key(
name=data['name'],
public_key=data['public_key'],
trust_level=data.get('trust_level', 'direct')
)
return jsonify({"added": result})
except Exception as e:
logger.error(f"Error adding trusted key: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/vault/trust/keys/<name>', methods=['DELETE'])
def remove_trusted_key(name):
try:
return jsonify({"removed": current_app.vault_manager.remove_trusted_key(name)})
except Exception as e:
logger.error(f"Error removing trusted key: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/vault/trust/verify', methods=['POST'])
def verify_trust_chain():
try:
data = request.get_json(silent=True)
if data is None:
return jsonify({"error": "No data provided"}), 400
result = current_app.vault_manager.verify_trust_chain(
peer_name=data['peer_name'],
signature=data['signature'],
data=data['data']
)
return jsonify({"verified": result})
except Exception as e:
logger.error(f"Error verifying trust chain: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/vault/trust/chains', methods=['GET'])
def get_trust_chains():
try:
return jsonify(current_app.vault_manager.get_trust_chains())
except Exception as e:
logger.error(f"Error getting trust chains: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/vault/secrets', methods=['GET'])
def list_secrets():
try:
from app import is_local_request
if not is_local_request():
return jsonify({'error': 'Access denied'}), 403
return jsonify({'secrets': current_app.vault_manager.list_secrets()})
except Exception as e:
return jsonify({'error': str(e)}), 500
@bp.route('/api/vault/secrets', methods=['POST'])
def store_secret():
try:
from app import is_local_request
if not is_local_request():
return jsonify({'error': 'Access denied'}), 403
data = request.get_json(silent=True)
if not data or 'name' not in data or 'value' not in data:
return jsonify({'error': 'Missing name or value'}), 400
current_app.vault_manager.store_secret(data['name'], data['value'])
return jsonify({'stored': True})
except Exception as e:
return jsonify({'error': str(e)}), 500
@bp.route('/api/vault/secrets/<name>', methods=['GET'])
def get_secret(name):
try:
from app import is_local_request
if not is_local_request():
return jsonify({'error': 'Access denied'}), 403
value = current_app.vault_manager.get_secret(name)
if value is None:
return jsonify({'error': 'Not found'}), 404
return jsonify({'name': name, 'value': value})
except Exception as e:
return jsonify({'error': str(e)}), 500
@bp.route('/api/vault/secrets/<name>', methods=['DELETE'])
def delete_secret(name):
try:
from app import is_local_request
if not is_local_request():
return jsonify({'error': 'Access denied'}), 403
return jsonify({'deleted': current_app.vault_manager.delete_secret(name)})
except Exception as e:
return jsonify({'error': str(e)}), 500
+236
View File
@@ -0,0 +1,236 @@
import logging
from flask import Blueprint, request, jsonify
logger = logging.getLogger('picell')
bp = Blueprint('wireguard', __name__)
@bp.route('/api/wireguard/keys', methods=['GET'])
def get_wireguard_keys():
try:
from app import wireguard_manager
keys = wireguard_manager.get_keys()
return jsonify({
'public_key': keys.get('public_key', ''),
'has_private_key': bool(keys.get('private_key')),
})
except Exception as e:
logger.error(f"Error getting WireGuard keys: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/wireguard/keys/peer', methods=['POST'])
def generate_peer_keys():
try:
from app import wireguard_manager
data = request.get_json(silent=True) or {}
name = data.get('name') or data.get('peer_name')
if not name:
return jsonify({"error": "Missing peer name"}), 400
return jsonify(wireguard_manager.generate_peer_keys(name))
except Exception as e:
logger.error(f"Error generating peer keys: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/wireguard/config', methods=['GET'])
def get_wireguard_config():
try:
from app import wireguard_manager
return jsonify(wireguard_manager.get_config())
except Exception as e:
logger.error(f"Error getting WireGuard config: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/wireguard/peers', methods=['GET'])
def get_wireguard_peers():
try:
from app import wireguard_manager
return jsonify(wireguard_manager.get_peers())
except Exception as e:
logger.error(f"Error getting WireGuard peers: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/wireguard/peers', methods=['POST'])
def add_wireguard_peer():
try:
from app import wireguard_manager
data = request.get_json(silent=True) or {}
result = wireguard_manager.add_peer(
name=data.get('name', ''),
public_key=data.get('public_key', ''),
endpoint_ip=data.get('endpoint', data.get('endpoint_ip', '')),
allowed_ips=data.get('allowed_ips', ''),
persistent_keepalive=data.get('persistent_keepalive', 25)
)
return jsonify({"success": result})
except Exception as e:
logger.error(f"Error adding WireGuard peer: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/wireguard/peers', methods=['DELETE'])
def remove_wireguard_peer():
try:
from app import wireguard_manager
data = request.get_json(silent=True) or {}
public_key = data.get('public_key') or data.get('name', '')
return jsonify({"success": wireguard_manager.remove_peer(public_key)})
except Exception as e:
logger.error(f"Error removing WireGuard peer: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/wireguard/status', methods=['GET'])
def get_wireguard_status():
try:
from app import wireguard_manager
return jsonify(wireguard_manager.get_status())
except Exception as e:
logger.error(f"Error getting WireGuard status: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/wireguard/connectivity', methods=['POST'])
def test_wireguard_connectivity():
try:
from app import wireguard_manager
data = request.get_json(silent=True)
if data is None:
return jsonify({"error": "No data provided"}), 400
return jsonify(wireguard_manager.test_connectivity(data))
except Exception as e:
logger.error(f"Error testing WireGuard connectivity: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/wireguard/peers/ip', methods=['PUT'])
def update_peer_ip():
try:
from app import wireguard_manager
data = request.get_json(silent=True) or {}
result = wireguard_manager.update_peer_ip(
data.get('public_key', data.get('peer', '')),
data.get('ip', '')
)
return jsonify({"success": result})
except Exception as e:
logger.error(f"Error updating peer IP: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/wireguard/peers/status', methods=['POST'])
def get_peer_status():
try:
from app import wireguard_manager
data = request.get_json(silent=True) or {}
public_key = data.get('public_key', '')
if not public_key:
return jsonify({"error": "Missing public_key"}), 400
return jsonify(wireguard_manager.get_peer_status(public_key))
except Exception as e:
logger.error(f"Error getting peer status: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/wireguard/peers/statuses', methods=['GET'])
def get_all_peer_statuses():
try:
from app import wireguard_manager
return jsonify(wireguard_manager.get_all_peer_statuses())
except Exception as e:
logger.error(f"Error getting peer statuses: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/wireguard/network/setup', methods=['POST'])
def setup_network():
try:
from app import wireguard_manager
success = wireguard_manager.setup_network_configuration()
if success:
return jsonify({"message": "Network configuration setup completed successfully"})
return jsonify({"error": "Failed to setup network configuration"}), 500
except Exception as e:
logger.error(f"Error setting up network configuration: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/wireguard/network/status', methods=['GET'])
def get_network_status():
try:
from app import wireguard_manager
return jsonify(wireguard_manager.get_network_status())
except Exception as e:
logger.error(f"Error getting network status: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/wireguard/peers/config', methods=['POST'])
def get_peer_config():
try:
from app import wireguard_manager, peer_registry
data = request.get_json(silent=True) or {}
peer_name = data.get('name', data.get('peer', ''))
peer_ip = data.get('ip', '')
peer_private_key = data.get('private_key', '')
registered = peer_registry.get_peer(peer_name) if peer_name else {}
if peer_name and (not peer_ip or not peer_private_key):
if registered:
peer_ip = peer_ip or registered.get('ip', '')
peer_private_key = peer_private_key or registered.get('private_key', '')
server_endpoint = data.get('server_endpoint', '')
if not server_endpoint:
srv = wireguard_manager.get_server_config()
server_endpoint = srv.get('endpoint') or '<SERVER_IP>'
allowed_ips = data.get('allowed_ips') or None
if not allowed_ips and registered:
internet_access = registered.get('internet_access', True)
allowed_ips = wireguard_manager.FULL_TUNNEL_IPS if internet_access else wireguard_manager.get_split_tunnel_ips()
result = wireguard_manager.get_peer_config(
peer_name=peer_name,
peer_ip=peer_ip,
peer_private_key=peer_private_key,
server_endpoint=server_endpoint,
allowed_ips=allowed_ips,
)
return jsonify({"config": result})
except Exception as e:
logger.error(f"Error getting peer config: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/wireguard/server-config', methods=['GET'])
def get_server_config():
try:
from app import wireguard_manager
return jsonify(wireguard_manager.get_server_config())
except Exception as e:
logger.error(f"Error getting server config: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/wireguard/refresh-ip', methods=['GET', 'POST'])
def refresh_external_ip():
try:
from app import wireguard_manager
ip = wireguard_manager.get_external_ip(force_refresh=True)
port = wireguard_manager._get_configured_port()
return jsonify({
'external_ip': ip,
'port': port,
'endpoint': f'{ip}:{port}' if ip else None,
})
except Exception as e:
logger.error(f"Error refreshing external IP: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/wireguard/apply-enforcement', methods=['POST'])
def apply_wireguard_enforcement():
try:
from app import peer_registry, firewall_manager, cell_link_manager, _configured_domain, COREFILE_PATH
peers = peer_registry.list_peers()
firewall_manager.apply_all_peer_rules(peers)
firewall_manager.apply_all_dns_rules(peers, COREFILE_PATH, _configured_domain(),
cell_links=cell_link_manager.list_connections())
return jsonify({'ok': True, 'peers': len(peers)})
except Exception as e:
return jsonify({'error': str(e)}), 500
@bp.route('/api/wireguard/check-port', methods=['GET', 'POST'])
def check_wireguard_port():
try:
from app import wireguard_manager
port_open = wireguard_manager.check_port_open()
return jsonify({'port_open': port_open, 'port': wireguard_manager._get_configured_port()})
except Exception as e:
return jsonify({"error": str(e)}), 500
+7 -23
View File
@@ -1074,33 +1074,17 @@ class RoutingManager(BaseServiceManager):
return False
def stop(self) -> bool:
"""Stop routing service"""
"""Stop routing service (state only — iptables rules are NOT flushed).
Flushing iptables here would destroy WireGuard MASQUERADE and all peer
FORWARD rules applied by firewall_manager. Individual rule removal is
handled by remove_nat_rule() / remove_firewall_rule().
"""
try:
# Set internal state to stopped
self._service_running = False
self._save_service_state()
# Try to clear all iptables rules (may fail in Docker without privileges)
try:
subprocess.run(['iptables', '-t', 'nat', '-F'],
check=True, timeout=10)
subprocess.run(['iptables', '-F'],
check=True, timeout=10)
except (subprocess.CalledProcessError, FileNotFoundError) as e:
logger.warning(f"Could not clear iptables rules: {e}")
# Continue anyway - service is considered stopped
# Try to disable IP forwarding (may fail in Docker without privileges)
try:
subprocess.run(['sysctl', '-w', 'net.ipv4.ip_forward=0'],
check=True, timeout=10)
except (subprocess.CalledProcessError, FileNotFoundError) as e:
logger.warning(f"Could not disable IP forwarding: {e}")
# Continue anyway - service is considered stopped
logger.info("Routing service stopped successfully")
logger.info("Routing service stopped (state only; iptables untouched)")
return True
except Exception as e:
logger.error(f"Failed to stop routing service: {e}")
# Even if system commands fail, we consider the service stopped
+16 -1
View File
@@ -365,6 +365,8 @@ class WireGuardManager(BaseServiceManager):
current_peer['ips'] = line.split('=', 1)[1].strip()
elif line.startswith('PersistentKeepalive'):
current_peer['ka'] = line.split('=', 1)[1].strip()
elif line.startswith('Endpoint'):
current_peer['endpoint'] = line.split('=', 1)[1].strip()
elif line == '' and 'pub' in current_peer:
desired[current_peer['pub']] = current_peer
current_peer = None
@@ -397,6 +399,8 @@ class WireGuardManager(BaseServiceManager):
'peer', pub,
'allowed-ips', p.get('ips', ''),
'persistent-keepalive', p.get('ka', '25')]
if p.get('endpoint'):
args += ['endpoint', p['endpoint']]
subprocess.run(args, capture_output=True, timeout=5)
logger.info(f'wg set applied: {len(desired)} peers')
@@ -483,7 +487,7 @@ class WireGuardManager(BaseServiceManager):
logger.error(f'add_cell_peer: invalid endpoint port: {endpoint!r}')
return False
try:
ipaddress.ip_network(vpn_subnet, strict=False)
remote_net = ipaddress.ip_network(vpn_subnet, strict=False)
except ValueError as e:
logger.error(f'add_cell_peer: invalid vpn_subnet {vpn_subnet!r}: {e}')
return False
@@ -491,6 +495,17 @@ class WireGuardManager(BaseServiceManager):
if any(c.isspace() for c in vpn_subnet):
logger.error(f'add_cell_peer: vpn_subnet contains whitespace: {vpn_subnet!r}')
return False
# Reject subnets that overlap the local WG network — would create a routing blackhole
try:
local_net = ipaddress.ip_network(self._get_configured_network(), strict=False)
if local_net.overlaps(remote_net):
logger.error(
f'add_cell_peer: vpn_subnet {vpn_subnet!r} overlaps local WG network '
f'{str(local_net)!r} — use a distinct subnet on the remote cell'
)
return False
except Exception:
pass
try:
content = self._read_config()
peer_block = (
+238
View File
@@ -160,3 +160,241 @@ class TestCellLinkManagerConnections(unittest.TestCase):
if __name__ == '__main__':
unittest.main()
# ---------------------------------------------------------------------------
# TestAddConnectionAtomicity
# ---------------------------------------------------------------------------
class TestAddConnectionAtomicity(unittest.TestCase):
"""Verify that add_connection rolls back correctly when WG or DNS steps fail."""
def setUp(self):
self.test_dir = tempfile.mkdtemp()
self.wg = _make_wg_mock()
self.nm = _make_nm_mock()
self.mgr = CellLinkManager(self.test_dir, self.test_dir, self.wg, self.nm)
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_wg_fail_does_not_call_dns(self):
"""When add_cell_peer returns False, add_cell_dns_forward must NOT be called."""
self.wg.add_cell_peer.return_value = False
with self.assertRaises(RuntimeError):
self.mgr.add_connection(SAMPLE_INVITE)
self.nm.add_cell_dns_forward.assert_not_called()
def test_wg_fail_does_not_persist_link(self):
"""When WG fails, list_connections() must still return [] (nothing persisted)."""
self.wg.add_cell_peer.return_value = False
with self.assertRaises(RuntimeError):
self.mgr.add_connection(SAMPLE_INVITE)
self.assertEqual(self.mgr.list_connections(), [])
def test_wg_fail_raises_runtime_error(self):
"""add_connection raises RuntimeError (not some other exception) when WG fails."""
self.wg.add_cell_peer.return_value = False
with self.assertRaises(RuntimeError):
self.mgr.add_connection(SAMPLE_INVITE)
def test_dns_warning_still_persists_link(self):
"""When DNS returns warnings (not a hard failure), the link IS still saved."""
self.nm.add_cell_dns_forward.return_value = {
'restarted': [],
'warnings': ['CoreDNS reload timed out'],
}
self.mgr.add_connection(SAMPLE_INVITE)
links = self.mgr.list_connections()
self.assertEqual(len(links), 1)
self.assertEqual(links[0]['cell_name'], 'office')
def test_dns_warning_does_not_raise(self):
"""When DNS returns warnings, add_connection completes without raising."""
self.nm.add_cell_dns_forward.return_value = {
'restarted': [],
'warnings': ['CoreDNS reload timed out'],
}
try:
self.mgr.add_connection(SAMPLE_INVITE)
except Exception as e:
self.fail(f"add_connection raised unexpectedly with DNS warnings: {e}")
# ---------------------------------------------------------------------------
# TestAddConnectionPermissions
# ---------------------------------------------------------------------------
class TestAddConnectionPermissions(unittest.TestCase):
"""Verify that inbound_services controls the permissions field on the saved link."""
def setUp(self):
self.test_dir = tempfile.mkdtemp()
self.wg = _make_wg_mock()
self.nm = _make_nm_mock()
self.mgr = CellLinkManager(self.test_dir, self.test_dir, self.wg, self.nm)
def tearDown(self):
shutil.rmtree(self.test_dir)
def _get_link(self):
links = self.mgr.list_connections()
self.assertEqual(len(links), 1)
return links[0]
def test_add_with_no_inbound_defaults_all_deny(self):
"""No inbound_services arg → all inbound permissions False."""
self.mgr.add_connection(SAMPLE_INVITE)
link = self._get_link()
inbound = link['permissions']['inbound']
for service, allowed in inbound.items():
self.assertFalse(allowed, f"Expected {service} to be False, got {allowed}")
def test_add_with_inbound_services_sets_them(self):
"""inbound_services=['calendar','files'] → those two True, others False."""
self.mgr.add_connection(SAMPLE_INVITE, inbound_services=['calendar', 'files'])
link = self._get_link()
inbound = link['permissions']['inbound']
self.assertTrue(inbound['calendar'])
self.assertTrue(inbound['files'])
self.assertFalse(inbound['mail'])
self.assertFalse(inbound['webdav'])
def test_inbound_invalid_service_ignored(self):
"""Passing 'badservice' in inbound_services does not appear in permissions."""
self.mgr.add_connection(SAMPLE_INVITE, inbound_services=['badservice', 'calendar'])
link = self._get_link()
inbound = link['permissions']['inbound']
self.assertNotIn('badservice', inbound)
# valid one was still applied
self.assertTrue(inbound['calendar'])
# ---------------------------------------------------------------------------
# TestUpdatePermissions
# ---------------------------------------------------------------------------
class TestUpdatePermissions(unittest.TestCase):
"""Tests for the new update_permissions / get_permissions methods."""
def setUp(self):
self.test_dir = tempfile.mkdtemp()
self.wg = _make_wg_mock()
self.nm = _make_nm_mock()
self.mgr = CellLinkManager(self.test_dir, self.test_dir, self.wg, self.nm)
# Add a connection so there is something to update
self.mgr.add_connection(SAMPLE_INVITE)
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_update_sets_inbound_values(self):
"""update_permissions with inbound={'calendar': True} persists correctly."""
with patch('cell_link_manager.firewall_manager', create=True) as mock_fm:
mock_fm.apply_cell_rules = MagicMock()
self.mgr.update_permissions('office', {'calendar': True}, {})
# Re-read from disk to confirm persistence
mgr2 = CellLinkManager(self.test_dir, self.test_dir, self.wg, self.nm)
perms = mgr2.get_permissions('office')
self.assertTrue(perms['inbound']['calendar'])
self.assertFalse(perms['inbound']['files'])
def test_update_rejects_unknown_service_by_cleaning_it_out(self):
"""update_permissions with inbound={'bad': True} — 'bad' must not appear in saved perms."""
with patch('cell_link_manager.firewall_manager', create=True) as mock_fm:
mock_fm.apply_cell_rules = MagicMock()
self.mgr.update_permissions('office', {'bad': True, 'calendar': True}, {})
perms = self.mgr.get_permissions('office')
self.assertNotIn('bad', perms['inbound'])
self.assertTrue(perms['inbound']['calendar'])
def test_update_nonexistent_cell_raises(self):
"""update_permissions on an unknown cell_name raises ValueError."""
with self.assertRaises(ValueError):
self.mgr.update_permissions('nosuchcell', {}, {})
def test_get_permissions_returns_correct(self):
"""get_permissions returns the dict that was saved by update_permissions."""
with patch('cell_link_manager.firewall_manager', create=True) as mock_fm:
mock_fm.apply_cell_rules = MagicMock()
self.mgr.update_permissions(
'office',
inbound={'calendar': True, 'files': False},
outbound={'mail': True},
)
perms = self.mgr.get_permissions('office')
self.assertIn('inbound', perms)
self.assertIn('outbound', perms)
self.assertTrue(perms['inbound']['calendar'])
self.assertFalse(perms['inbound']['files'])
self.assertTrue(perms['outbound']['mail'])
def test_get_permissions_nonexistent_cell_raises(self):
"""get_permissions on an unknown cell_name raises ValueError."""
with self.assertRaises(ValueError):
self.mgr.get_permissions('nosuchcell')
# ---------------------------------------------------------------------------
# TestLoadMigration
# ---------------------------------------------------------------------------
class TestLoadMigration(unittest.TestCase):
"""Verify _load() lazily injects permissions field when it is missing."""
def setUp(self):
self.test_dir = tempfile.mkdtemp()
self.wg = _make_wg_mock()
self.nm = _make_nm_mock()
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_load_injects_permissions_if_missing(self):
"""Write cell_links.json without permissions; _load should add all-False defaults."""
links_file = os.path.join(self.test_dir, 'cell_links.json')
legacy_links = [
{
'cell_name': 'legacy-office',
'public_key': 'officepubkey=',
'vpn_subnet': '10.1.0.0/24',
'dns_ip': '10.1.0.1',
'domain': 'legacy-office.cell',
# NO 'permissions' key — simulates pre-migration data
}
]
with open(links_file, 'w') as f:
json.dump(legacy_links, f)
mgr = CellLinkManager(self.test_dir, self.test_dir, self.wg, self.nm)
links = mgr.list_connections()
self.assertEqual(len(links), 1)
link = links[0]
self.assertIn('permissions', link)
perms = link['permissions']
self.assertIn('inbound', perms)
self.assertIn('outbound', perms)
for service in ('calendar', 'files', 'mail', 'webdav'):
self.assertFalse(perms['inbound'][service])
self.assertFalse(perms['outbound'][service])
def test_load_migration_persists_to_disk(self):
"""After migration, re-loading the same file returns the injected permissions."""
links_file = os.path.join(self.test_dir, 'cell_links.json')
with open(links_file, 'w') as f:
json.dump([{
'cell_name': 'old-cell',
'public_key': 'somepubkey=',
'vpn_subnet': '10.2.0.0/24',
'dns_ip': '10.2.0.1',
'domain': 'old-cell.cell',
}], f)
mgr1 = CellLinkManager(self.test_dir, self.test_dir, self.wg, self.nm)
mgr1.list_connections() # triggers migration + save
# Read the file directly and confirm permissions are now on disk
with open(links_file) as f:
raw = json.load(f)
self.assertIn('permissions', raw[0])
+225
View File
@@ -291,5 +291,230 @@ class TestGetCellConnectionStatus(unittest.TestCase):
self.assertIn('error', json.loads(r.data))
class TestAddCellRuntimeError(unittest.TestCase):
"""POST /api/cells — RuntimeError from the manager must now return 400, not 500."""
def setUp(self):
app.config['TESTING'] = True
self.client = app.test_client()
@patch('app.cell_link_manager')
def test_add_cell_runtime_error_returns_400(self, mock_clm):
"""When add_connection raises RuntimeError (WG failure), endpoint returns 400."""
mock_clm.add_connection.side_effect = RuntimeError('Failed to add WireGuard peer')
r = self.client.post(
'/api/cells',
data=json.dumps(_VALID_CELL_BODY),
content_type='application/json',
)
self.assertEqual(r.status_code, 400)
data = json.loads(r.data)
self.assertIn('error', data)
@patch('app.cell_link_manager')
def test_add_cell_runtime_error_body_contains_message(self, mock_clm):
"""The 400 response for a RuntimeError includes the error message."""
mock_clm.add_connection.side_effect = RuntimeError('WireGuard peer add failed')
r = self.client.post(
'/api/cells',
data=json.dumps(_VALID_CELL_BODY),
content_type='application/json',
)
data = json.loads(r.data)
self.assertIn('WireGuard', data['error'])
class TestListServices(unittest.TestCase):
"""GET /api/cells/services"""
def setUp(self):
app.config['TESTING'] = True
self.client = app.test_client()
def test_list_services_returns_200(self):
"""GET /api/cells/services returns HTTP 200."""
r = self.client.get('/api/cells/services')
self.assertEqual(r.status_code, 200)
def test_list_services_returns_services_key(self):
"""Response body has a 'services' key."""
r = self.client.get('/api/cells/services')
data = json.loads(r.data)
self.assertIn('services', data)
def test_list_services_returns_list(self):
"""'services' value is a non-empty list."""
r = self.client.get('/api/cells/services')
data = json.loads(r.data)
self.assertIsInstance(data['services'], list)
self.assertGreater(len(data['services']), 0)
def test_list_services_includes_known_services(self):
"""'services' includes the four known shareable services."""
r = self.client.get('/api/cells/services')
services = json.loads(r.data)['services']
for expected in ('calendar', 'files', 'mail', 'webdav'):
self.assertIn(expected, services)
class TestGetCellPermissions(unittest.TestCase):
"""GET /api/cells/<name>/permissions"""
def setUp(self):
app.config['TESTING'] = True
self.client = app.test_client()
@patch('app.cell_link_manager')
def test_get_permissions_returns_200(self, mock_clm):
"""GET /api/cells/office/permissions returns 200 when cell exists."""
mock_clm.get_permissions.return_value = {
'inbound': {'calendar': True, 'files': False, 'mail': False, 'webdav': False},
'outbound': {'calendar': False, 'files': False, 'mail': False, 'webdav': False},
}
r = self.client.get('/api/cells/office/permissions')
self.assertEqual(r.status_code, 200)
@patch('app.cell_link_manager')
def test_get_permissions_response_has_inbound_and_outbound(self, mock_clm):
"""Response body contains 'inbound' and 'outbound' keys."""
mock_clm.get_permissions.return_value = {
'inbound': {'calendar': False, 'files': False, 'mail': False, 'webdav': False},
'outbound': {'calendar': False, 'files': False, 'mail': False, 'webdav': False},
}
r = self.client.get('/api/cells/office/permissions')
data = json.loads(r.data)
self.assertIn('inbound', data)
self.assertIn('outbound', data)
@patch('app.cell_link_manager')
def test_get_permissions_unknown_cell_returns_404(self, mock_clm):
"""ValueError from get_permissions maps to 404."""
mock_clm.get_permissions.side_effect = ValueError('cell not found')
r = self.client.get('/api/cells/nosuchcell/permissions')
self.assertEqual(r.status_code, 404)
self.assertIn('error', json.loads(r.data))
@patch('app.cell_link_manager')
def test_get_permissions_passes_cell_name(self, mock_clm):
"""The cell_name URL segment is forwarded to get_permissions."""
mock_clm.get_permissions.return_value = {'inbound': {}, 'outbound': {}}
self.client.get('/api/cells/faraway/permissions')
mock_clm.get_permissions.assert_called_once_with('faraway')
class TestUpdateCellPermissions(unittest.TestCase):
"""PUT /api/cells/<name>/permissions"""
def setUp(self):
app.config['TESTING'] = True
self.client = app.test_client()
_VALID_PERM_BODY = {
'inbound': {'calendar': True, 'files': False, 'mail': False, 'webdav': False},
'outbound': {'calendar': False, 'files': False, 'mail': False, 'webdav': False},
}
@patch('app.cell_link_manager')
@patch('app.peer_registry')
@patch('app.firewall_manager')
@patch('app.config_manager')
def test_update_permissions_returns_200(self, mock_cfg, mock_fm, mock_pr, mock_clm):
"""PUT with valid inbound/outbound returns 200."""
mock_cfg.configs = {'_identity': {'domain': 'cell'}}
mock_pr.list_peers.return_value = []
mock_clm.list_connections.return_value = []
mock_clm.update_permissions.return_value = {
'cell_name': 'office',
'permissions': self._VALID_PERM_BODY,
}
mock_fm.apply_all_dns_rules.return_value = True
r = self.client.put(
'/api/cells/office/permissions',
data=json.dumps(self._VALID_PERM_BODY),
content_type='application/json',
)
self.assertEqual(r.status_code, 200)
data = json.loads(r.data)
self.assertIn('message', data)
self.assertIn('link', data)
@patch('app.cell_link_manager')
def test_update_permissions_unknown_service_returns_400(self, mock_clm):
"""PUT body containing an unknown service name returns 400."""
body = {
'inbound': {'bad_service': True, 'calendar': True},
'outbound': {},
}
r = self.client.put(
'/api/cells/office/permissions',
data=json.dumps(body),
content_type='application/json',
)
self.assertEqual(r.status_code, 400)
data = json.loads(r.data)
self.assertIn('error', data)
# update_permissions should NOT have been called when validation fails
mock_clm.update_permissions.assert_not_called()
@patch('app.cell_link_manager')
def test_update_permissions_unknown_cell_returns_404(self, mock_clm):
"""ValueError from update_permissions (cell not found) maps to 404."""
mock_clm.update_permissions.side_effect = ValueError('cell not found')
r = self.client.put(
'/api/cells/nosuchcell/permissions',
data=json.dumps(self._VALID_PERM_BODY),
content_type='application/json',
)
self.assertEqual(r.status_code, 404)
self.assertIn('error', json.loads(r.data))
@patch('app.cell_link_manager')
def test_update_permissions_no_body_returns_400(self, mock_clm):
"""PUT with no JSON body returns 400."""
r = self.client.put('/api/cells/office/permissions')
self.assertEqual(r.status_code, 400)
self.assertIn('error', json.loads(r.data))
mock_clm.update_permissions.assert_not_called()
@patch('app.cell_link_manager')
def test_update_permissions_outbound_unknown_service_returns_400(self, mock_clm):
"""Unknown service in outbound (not just inbound) also returns 400."""
body = {
'inbound': {'calendar': True},
'outbound': {'hacked': True},
}
r = self.client.put(
'/api/cells/office/permissions',
data=json.dumps(body),
content_type='application/json',
)
self.assertEqual(r.status_code, 400)
@patch('app.cell_link_manager')
@patch('app.peer_registry')
@patch('app.firewall_manager')
@patch('app.config_manager')
def test_update_permissions_passes_inbound_outbound_to_manager(
self, mock_cfg, mock_fm, mock_pr, mock_clm):
"""update_permissions is called with inbound and outbound dicts from the body."""
mock_cfg.configs = {'_identity': {'domain': 'cell'}}
mock_pr.list_peers.return_value = []
mock_clm.list_connections.return_value = []
mock_clm.update_permissions.return_value = {
'cell_name': 'office', 'permissions': self._VALID_PERM_BODY
}
mock_fm.apply_all_dns_rules.return_value = True
self.client.put(
'/api/cells/office/permissions',
data=json.dumps(self._VALID_PERM_BODY),
content_type='application/json',
)
mock_clm.update_permissions.assert_called_once_with(
'office',
self._VALID_PERM_BODY['inbound'],
self._VALID_PERM_BODY['outbound'],
)
if __name__ == '__main__':
unittest.main()
+7 -3
View File
@@ -65,18 +65,22 @@ class TestConfigApplyRoute(unittest.TestCase):
data = json.loads(r.data)
self.assertTrue(data.get('restart_in_progress'))
# ── Pending state cleared after apply ─────────────────────────────────
# ── Pending state marked "applying" after apply (not immediately cleared)
@patch('threading.Thread')
@patch('docker.from_env')
def test_apply_clears_pending_state(self, mock_docker, mock_thread):
def test_apply_sets_applying_flag(self, mock_docker, mock_thread):
mock_docker.side_effect = Exception('no docker in test')
# Don't actually start the thread so we don't need subprocess
mock_thread.return_value = MagicMock()
_set_pending_restart(['config changed'], ['*'])
self.client.post('/api/config/apply')
pending = config_manager.configs.get('_pending_restart', {})
self.assertFalse(pending.get('needs_restart', False))
# The route now marks needs_restart=True + applying=True instead of clearing
# immediately. The helper container clears the flag on success; if the helper
# fails, needs_restart stays set so the UI continues showing pending changes.
self.assertTrue(pending.get('needs_restart', False))
self.assertTrue(pending.get('applying', False))
# ── needs_network_recreate=True → helper script includes 'down' ────────
+216
View File
@@ -406,5 +406,221 @@ class TestUpdateServiceIps(unittest.TestCase):
self.assertNotIn('172.20.0.21', dest_ips)
# ---------------------------------------------------------------------------
# TestCellRules
# ---------------------------------------------------------------------------
class TestCellRules(unittest.TestCase):
"""Tests for apply_cell_rules, clear_cell_rules, _cell_tag, and apply_all_cell_rules."""
# ── helpers ───────────────────────────────────────────────────────────────
def _capture_apply(self, cell_name, vpn_subnet, inbound_services):
"""Run apply_cell_rules with _wg_exec mocked; return list of captured iptables arg lists."""
calls_made = []
def fake_wg_exec(args):
calls_made.append(args)
m = MagicMock()
m.returncode = 0
m.stdout = ''
return m
with patch.object(firewall_manager, '_wg_exec', side_effect=fake_wg_exec):
firewall_manager.apply_cell_rules(cell_name, vpn_subnet, inbound_services)
return [c for c in calls_made if 'iptables' in c]
def _targets_for_dest(self, iptables_calls, dest_ip):
"""Return list of -j targets where -d matches dest_ip."""
targets = []
for c in iptables_calls:
if '-d' in c and dest_ip in c and '-j' in c:
targets.append(c[c.index('-j') + 1])
return targets
# ── _cell_tag ─────────────────────────────────────────────────────────────
def test_cell_tag_sanitises_spaces_and_punctuation(self):
"""_cell_tag replaces non-alphanumeric chars with dashes."""
tag = firewall_manager._cell_tag('my cell!')
self.assertTrue(tag.startswith('pic-cell-'))
self.assertNotIn(' ', tag)
self.assertNotIn('!', tag)
def test_cell_tag_lowercase(self):
"""_cell_tag lowercases the cell name."""
tag = firewall_manager._cell_tag('Office')
self.assertIn('office', tag)
def test_cell_tag_has_pic_cell_prefix(self):
"""_cell_tag always starts with 'pic-cell-'."""
self.assertTrue(firewall_manager._cell_tag('remote').startswith('pic-cell-'))
def test_cell_tag_distinct_from_peer_tag(self):
"""A cell tag must not equal the peer comment for the same string."""
cell_tag = firewall_manager._cell_tag('10.0.0.2')
peer_tag = firewall_manager._peer_comment('10.0.0.2')
self.assertNotEqual(cell_tag, peer_tag)
# ── apply_cell_rules — catch-all DROP ─────────────────────────────────────
def test_apply_cell_rules_sends_catch_all_drop(self):
"""apply_cell_rules always inserts a DROP for the entire vpn_subnet."""
calls = self._capture_apply('office', '10.0.1.0/24', ['calendar'])
subnet_drops = [
c for c in calls
if '-s' in c and '10.0.1.0/24' in c
and '-j' in c and c[c.index('-j') + 1] == 'DROP'
and '-d' not in c # catch-all has no destination
]
self.assertTrue(subnet_drops, "Expected a catch-all DROP rule for the subnet")
def test_apply_cell_rules_sends_accept_for_allowed_service(self):
"""apply_cell_rules inserts ACCEPT for the calendar VIP when calendar is in inbound."""
calls = self._capture_apply('office', '10.0.1.0/24', ['calendar'])
calendar_ip = firewall_manager.SERVICE_IPS['calendar']
calendar_targets = self._targets_for_dest(calls, calendar_ip)
self.assertIn('ACCEPT', calendar_targets)
def test_apply_cell_rules_sends_drop_for_disallowed_service(self):
"""apply_cell_rules inserts DROP for a service not in inbound_services."""
calls = self._capture_apply('office', '10.0.1.0/24', ['calendar'])
files_ip = firewall_manager.SERVICE_IPS['files']
files_targets = self._targets_for_dest(calls, files_ip)
self.assertIn('DROP', files_targets)
# ── apply_cell_rules — empty inbound (all-deny) ───────────────────────────
def test_apply_cell_rules_empty_inbound_all_drop(self):
"""With inbound_services=[], all per-service rules are DROP."""
calls = self._capture_apply('office', '10.0.1.0/24', [])
for service, svc_ip in firewall_manager.SERVICE_IPS.items():
svc_targets = self._targets_for_dest(calls, svc_ip)
self.assertTrue(svc_targets,
f"Expected at least one rule for {service} ({svc_ip})")
self.assertNotIn('ACCEPT', svc_targets,
f"{service} should be DROP when not in inbound_services")
# ── apply_cell_rules — all inbound (all-accept) ───────────────────────────
def test_apply_cell_rules_all_inbound_all_accept(self):
"""With all four services in inbound, all per-service rules are ACCEPT."""
all_services = list(firewall_manager.SERVICE_IPS.keys())
calls = self._capture_apply('office', '10.0.1.0/24', all_services)
for service, svc_ip in firewall_manager.SERVICE_IPS.items():
svc_targets = self._targets_for_dest(calls, svc_ip)
self.assertIn('ACCEPT', svc_targets,
f"{service} should be ACCEPT when in inbound_services")
# ── apply_cell_rules — all rules tagged ───────────────────────────────────
def test_apply_cell_rules_all_rules_tagged_with_cell_tag(self):
"""Every insertion rule must carry the cell's comment tag."""
calls = self._capture_apply('office', '10.0.1.0/24', ['calendar'])
tag = firewall_manager._cell_tag('office')
for c in calls:
if '-I' in c:
self.assertIn(tag, c, f"Rule missing cell tag: {c}")
# ── clear_cell_rules — noop when no matching rules ────────────────────────
def test_clear_cell_rules_noop_when_no_rules(self):
"""When iptables-save returns no pic-cell-office lines, iptables-restore is NOT called."""
save_output = '*filter\n:FORWARD ACCEPT [0:0]\nCOMMIT\n'
def fake_wg_exec(args):
m = MagicMock()
m.returncode = 0
m.stdout = save_output
return m
with patch.object(firewall_manager, '_wg_exec', side_effect=fake_wg_exec), \
patch('subprocess.run') as mock_restore:
firewall_manager.clear_cell_rules('office')
mock_restore.assert_not_called()
def test_clear_cell_rules_removes_tagged_lines(self):
"""clear_cell_rules removes lines carrying the cell tag and keeps others."""
tag = firewall_manager._cell_tag('office')
save_output = (
'*filter\n'
':FORWARD ACCEPT [0:0]\n'
f'-A FORWARD -s 10.0.1.0/24 -m comment --comment "{tag}" -j DROP\n'
'-A FORWARD -s 10.0.0.2 -m comment --comment "pic-peer-10-0-0-2/32" -j ACCEPT\n'
'COMMIT\n'
)
restored = []
def fake_wg_exec(args):
m = MagicMock()
m.returncode = 0
if args == ['iptables-save']:
m.stdout = save_output
return m
def fake_restore(cmd, input, **kwargs):
restored.append(input)
m = MagicMock()
m.returncode = 0
return m
with patch.object(firewall_manager, '_wg_exec', side_effect=fake_wg_exec), \
patch('subprocess.run', side_effect=fake_restore):
firewall_manager.clear_cell_rules('office')
self.assertEqual(len(restored), 1)
content = restored[0]
self.assertNotIn(tag, content)
# peer rule for a different entity must survive
self.assertIn('pic-peer-10-0-0-2/32', content)
# ── apply_all_cell_rules ──────────────────────────────────────────────────
def test_apply_all_cell_rules_calls_apply_for_each(self):
"""apply_all_cell_rules calls apply_cell_rules once per link with correct args."""
cell_links = [
{
'cell_name': 'office',
'vpn_subnet': '10.1.0.0/24',
'permissions': {'inbound': {'calendar': True, 'files': False, 'mail': False, 'webdav': False},
'outbound': {}},
},
{
'cell_name': 'cabin',
'vpn_subnet': '10.2.0.0/24',
'permissions': {'inbound': {'calendar': False, 'files': True, 'mail': False, 'webdav': False},
'outbound': {}},
},
]
with patch.object(firewall_manager, 'apply_cell_rules', return_value=True) as mock_apply:
firewall_manager.apply_all_cell_rules(cell_links)
self.assertEqual(mock_apply.call_count, 2)
call_kwargs = {c.args[0]: c.args for c in mock_apply.call_args_list}
self.assertIn('office', call_kwargs)
self.assertIn('cabin', call_kwargs)
office_args = call_kwargs['office']
self.assertEqual(office_args[1], '10.1.0.0/24')
self.assertIn('calendar', office_args[2])
self.assertNotIn('files', office_args[2])
def test_apply_all_cell_rules_skips_links_with_missing_fields(self):
"""Links without cell_name or vpn_subnet are silently skipped."""
cell_links = [
{'vpn_subnet': '10.1.0.0/24'}, # no cell_name
{'cell_name': 'broken'}, # no vpn_subnet
{'cell_name': 'office', 'vpn_subnet': '10.3.0.0/24',
'permissions': {'inbound': {}, 'outbound': {}}},
]
with patch.object(firewall_manager, 'apply_cell_rules', return_value=True) as mock_apply:
firewall_manager.apply_all_cell_rules(cell_links)
# Only the complete entry should be processed
self.assertEqual(mock_apply.call_count, 1)
self.assertEqual(mock_apply.call_args.args[0], 'office')
if __name__ == '__main__':
unittest.main()
+2 -2
View File
@@ -31,7 +31,7 @@ class TestAddPeerSubnetExhaustion(unittest.TestCase):
app.config['TESTING'] = True
self.client = app.test_client()
@patch('app._next_peer_ip')
@patch('routes.peers._next_peer_ip')
@patch('app.auth_manager')
def test_add_peer_returns_409_when_subnet_exhausted(self, mock_auth, mock_next_ip):
mock_auth.create_user.return_value = True
@@ -50,7 +50,7 @@ class TestAddPeerSubnetExhaustion(unittest.TestCase):
data = json.loads(r.data)
self.assertIn('error', data)
@patch('app._next_peer_ip')
@patch('routes.peers._next_peer_ip')
@patch('app.auth_manager')
def test_add_peer_409_error_message_mentions_ip(self, mock_auth, mock_next_ip):
mock_auth.create_user.return_value = True
+47
View File
@@ -370,3 +370,50 @@ def test_delete_nonexistent_peer_returns_gracefully(admin_client, mock_peer_regi
r = _delete_peer(admin_client, 'nobody')
# Route must not 500 when the peer simply doesn't exist
assert r.status_code in (200, 404)
# ── POST /api/peers — firewall rollback (A3) ──────────────────────────────────
def test_create_peer_rolls_back_firewall_on_dns_failure(
auth_mgr, mock_email_mgr, mock_calendar_mgr,
mock_file_mgr, mock_wg_mgr, mock_peer_registry):
"""If apply_all_dns_rules raises after firewall rules were applied, the peer
add must call clear_peer_rules to undo the firewall state (A3 fix)."""
app.config['TESTING'] = True
app.config['SECRET_KEY'] = 'test-secret'
mock_fw = MagicMock()
mock_fw.apply_peer_rules.return_value = True
mock_fw.apply_all_dns_rules.side_effect = RuntimeError('CoreDNS unreachable')
patches = [
patch('app.auth_manager', auth_mgr),
patch('app.email_manager', mock_email_mgr),
patch('app.calendar_manager', mock_calendar_mgr),
patch('app.file_manager', mock_file_mgr),
patch('app.wireguard_manager', mock_wg_mgr),
patch('app.peer_registry', mock_peer_registry),
patch('app.firewall_manager', mock_fw),
]
try:
import auth_routes
patches.append(patch.object(auth_routes, 'auth_manager', auth_mgr, create=True))
except (ImportError, AttributeError):
pass
started = [p.start() for p in patches]
try:
with app.test_client() as client:
r = _login(client)
assert r.status_code == 200
resp = _post_peer(client)
assert resp.status_code == 500, (
f'expected 500 on DNS failure but got {resp.status_code}'
)
# Firewall rules must be cleared as part of rollback
mock_fw.clear_peer_rules.assert_called_once()
# Registry entry must also be rolled back
mock_peer_registry.remove_peer.assert_called_once()
finally:
for p in patches:
p.stop()
+6 -6
View File
@@ -238,7 +238,7 @@ class TestWireGuardPortPropagation(unittest.TestCase):
app.config['TESTING'] = True
self.client = app.test_client()
@patch('app._set_pending_restart')
@patch('routes.config._set_pending_restart')
@patch('app.wireguard_manager')
@patch('app.config_manager')
def test_wireguard_port_identity_change_calls_apply_config(
@@ -263,7 +263,7 @@ class TestWireGuardPortPropagation(unittest.TestCase):
self.assertEqual(r.status_code, 200)
mock_wg.apply_config.assert_called_once_with({'port': 51821})
@patch('app._set_pending_restart')
@patch('routes.config._set_pending_restart')
@patch('app.wireguard_manager')
@patch('app.config_manager')
def test_wireguard_port_same_value_does_not_call_apply_config(
@@ -305,7 +305,7 @@ class TestApplyPendingConfigForceRecreate(unittest.TestCase):
app.config['TESTING'] = True
self.client = app.test_client()
@patch('app._clear_pending_restart')
@patch('routes.config._clear_pending_restart')
@patch('app.config_manager')
def test_apply_pending_uses_force_recreate(self, mock_cm, mock_clear):
"""apply_pending_config for specific containers must include --force-recreate."""
@@ -325,7 +325,7 @@ class TestApplyPendingConfigForceRecreate(unittest.TestCase):
t.start = lambda: None
return t
with patch('app.threading.Thread', side_effect=patched_thread):
with patch('routes.config.threading.Thread', side_effect=patched_thread):
r = self.client.post('/api/config/apply')
self.assertEqual(r.status_code, 200)
@@ -344,7 +344,7 @@ class TestApplyPendingConfigForceRecreate(unittest.TestCase):
f'--force-recreate missing from docker compose command: {cmd}')
self.assertIn('wireguard', cmd)
@patch('app._clear_pending_restart')
@patch('routes.config._clear_pending_restart')
@patch('app.config_manager')
def test_apply_pending_all_services_no_force_recreate(self, mock_cm, mock_clear):
"""All-services restart ('*') uses a helper container (Popen), not subprocess.run."""
@@ -364,7 +364,7 @@ class TestApplyPendingConfigForceRecreate(unittest.TestCase):
t.start = lambda: None
return t
with patch('app.threading.Thread', side_effect=patched_thread):
with patch('routes.config.threading.Thread', side_effect=patched_thread):
r = self.client.post('/api/config/apply')
self.assertEqual(r.status_code, 200)
+75
View File
@@ -629,5 +629,80 @@ class TestWireGuardSysctlAndPortCheck(unittest.TestCase):
self.assertEqual(statuses, {})
class TestAddCellPeerSubnetOverlap(unittest.TestCase):
"""Verify that add_cell_peer rejects a vpn_subnet that overlaps the local WG network."""
def setUp(self):
self.test_dir = tempfile.mkdtemp()
self.data_dir = os.path.join(self.test_dir, 'data')
self.config_dir = os.path.join(self.test_dir, 'config')
os.makedirs(self.data_dir, exist_ok=True)
os.makedirs(self.config_dir, exist_ok=True)
patcher = patch.object(WireGuardManager, '_syncconf', return_value=None)
self.mock_sync = patcher.start()
self.addCleanup(patcher.stop)
self.wg = WireGuardManager(self.data_dir, self.config_dir)
# Write a known wg0.conf so _get_configured_network() returns 10.0.0.0/24
self._write_wg_conf(address='10.0.0.1/24')
def tearDown(self):
shutil.rmtree(self.test_dir)
def _write_wg_conf(self, address='10.0.0.1/24', port=51820):
conf = (
f'[Interface]\n'
f'PrivateKey = dummykey\n'
f'Address = {address}\n'
f'ListenPort = {port}\n'
)
cf = self.wg._config_file()
os.makedirs(os.path.dirname(cf), exist_ok=True)
with open(cf, 'w') as f:
f.write(conf)
# Public key is 44 chars ending in '=' — required by validation in add_cell_peer
_CELL_PUBKEY = 'cmVtb3RlcHVia2V5X2Zvcl90ZXN0c193Z3Rlc3QxMiE='
def test_add_cell_peer_overlapping_subnet_returns_false(self):
"""vpn_subnet that exactly matches the local WG network must be rejected."""
# local is 10.0.0.0/24; remote is also 10.0.0.0/24 — clear overlap
ok = self.wg.add_cell_peer(
'remote', self._CELL_PUBKEY, '5.6.7.8:51821', '10.0.0.0/24'
)
self.assertFalse(ok)
def test_add_cell_peer_partially_overlapping_subnet_returns_false(self):
"""A remote subnet that contains the local network (e.g. /16 ⊃ /24) is rejected."""
# 10.0.0.0/16 contains 10.0.0.0/24 → overlaps
ok = self.wg.add_cell_peer(
'remote', self._CELL_PUBKEY, '5.6.7.8:51821', '10.0.0.0/16'
)
self.assertFalse(ok)
def test_add_cell_peer_non_overlapping_subnet_accepted(self):
"""A remote subnet distinct from the local WG network must be accepted."""
# local is 10.0.0.0/24; remote is 10.0.1.0/24 — no overlap
ok = self.wg.add_cell_peer(
'remote', self._CELL_PUBKEY, '5.6.7.8:51821', '10.0.1.0/24'
)
self.assertTrue(ok)
def test_add_cell_peer_no_overlap_different_class_a(self):
"""A completely different address space is accepted."""
# local is 10.0.0.0/24; remote is 192.168.5.0/24 — no overlap
ok = self.wg.add_cell_peer(
'remote', self._CELL_PUBKEY, '5.6.7.8:51821', '192.168.5.0/24'
)
self.assertTrue(ok)
def test_add_cell_peer_overlap_check_uses_configured_network(self):
"""When wg0.conf says 172.16.0.1/12, overlapping that range is rejected."""
self._write_wg_conf(address='172.16.0.1/12')
ok = self.wg.add_cell_peer(
'remote', self._CELL_PUBKEY, '5.6.7.8:51821', '172.16.0.0/12'
)
self.assertFalse(ok)
if __name__ == '__main__':
unittest.main()
+257 -39
View File
@@ -1,9 +1,25 @@
import { useState, useEffect } from 'react';
import { Link2, Link2Off, Copy, CheckCheck, RefreshCw, Plug, Unplug, Globe, Wifi } from 'lucide-react';
import { Link2, Link2Off, Copy, CheckCheck, RefreshCw, Plug, Unplug, Globe, Wifi, Calendar, FolderOpen, Mail, HardDrive, ChevronDown, ChevronRight } from 'lucide-react';
import { cellLinkAPI } from '../services/api';
import { useConfig } from '../contexts/ConfigContext';
import QRCode from 'qrcode';
const relativeTime = (ts) => {
if (!ts) return null;
const diff = Math.floor((Date.now() / 1000) - (typeof ts === 'string' ? new Date(ts).getTime() / 1000 : ts));
if (diff < 60) return `${diff}s ago`;
if (diff < 3600) return `${Math.floor(diff / 60)}m ago`;
if (diff < 86400) return `${Math.floor(diff / 3600)}h ago`;
return `${Math.floor(diff / 86400)}d ago`;
};
const SERVICE_DEFS = [
{ key: 'calendar', label: 'Calendar', Icon: Calendar },
{ key: 'files', label: 'Files', Icon: FolderOpen },
{ key: 'mail', label: 'Mail', Icon: Mail },
{ key: 'webdav', label: 'WebDAV', Icon: HardDrive },
];
function CopyButton({ text, small }) {
const [copied, setCopied] = useState(false);
const copy = () => {
@@ -52,6 +68,194 @@ function useToasts() {
return [toasts, add];
}
function DisconnectConfirmModal({ cellName, onConfirm, onCancel }) {
return (
<div className="fixed inset-0 z-50 flex items-center justify-center bg-black/40">
<div className="bg-white rounded-xl shadow-2xl p-6 max-w-sm w-full mx-4">
<div className="flex items-center gap-3 mb-3">
<Unplug className="h-6 w-6 text-red-500 flex-shrink-0" />
<h3 className="text-base font-semibold text-gray-900">Disconnect "{cellName}"?</h3>
</div>
<p className="text-sm text-gray-600 mb-2">
This will remove the WireGuard tunnel and all sharing permissions between your cells.
</p>
<p className="text-xs text-gray-400 mb-5">
The other cell's admin will need to remove the connection on their end too. Shared services will become inaccessible immediately.
</p>
<div className="flex gap-3 justify-end">
<button onClick={onCancel} autoFocus className="btn btn-secondary text-sm">Cancel</button>
<button onClick={onConfirm} className="btn btn-danger text-sm flex items-center gap-2">
<Unplug className="h-4 w-4" /> Disconnect
</button>
</div>
</div>
</div>
);
}
function ServiceShareToggle({ serviceKey, label, Icon, enabled, saving, onChange }) {
return (
<label className="flex items-center gap-3 cursor-pointer select-none py-1">
<div
role="switch"
aria-checked={enabled}
aria-label={`Share ${label}`}
tabIndex={0}
onClick={() => !saving && onChange(!enabled)}
onKeyDown={e => (e.key === 'Enter' || e.key === ' ') && !saving && onChange(!enabled)}
className={`relative w-10 h-5 rounded-full transition-colors focus:outline-none focus:ring-2 focus:ring-primary-400 focus:ring-offset-1 ${
enabled ? 'bg-primary-500' : 'bg-gray-300'
} ${saving ? 'opacity-60 cursor-wait' : 'cursor-pointer'}`}
>
<span className={`absolute top-0.5 left-0.5 w-4 h-4 bg-white rounded-full shadow transition-transform ${
enabled ? 'translate-x-5' : ''
}`} />
</div>
<Icon className="h-4 w-4 text-gray-500 shrink-0" />
<span className="text-sm text-gray-700">{label}</span>
</label>
);
}
function InboundServiceBadge({ label, Icon, active }) {
return (
<span className={`inline-flex items-center gap-1 px-2 py-0.5 rounded text-xs font-medium ${
active ? 'bg-green-100 text-green-700' : 'bg-gray-100 text-gray-400'
}`}>
<Icon className="h-3 w-3" />
{label}
</span>
);
}
function CellPanel({ conn, onDisconnect, addToast }) {
const [open, setOpen] = useState(false);
const [inboundPerms, setInboundPerms] = useState(conn.permissions?.inbound || {});
const [saving, setSaving] = useState({});
const [confirmDisconnect, setConfirmDisconnect] = useState(false);
const handleToggle = async (serviceKey, newValue) => {
setSaving(s => ({ ...s, [serviceKey]: true }));
const newInbound = { ...inboundPerms, [serviceKey]: newValue };
try {
await cellLinkAPI.updatePermissions(conn.cell_name, newInbound, conn.permissions?.outbound || {});
setInboundPerms(newInbound);
addToast(`${serviceKey} sharing ${newValue ? 'enabled' : 'disabled'}`, 'success');
} catch {
addToast('Failed to save sharing permission', 'error');
} finally {
setSaving(s => ({ ...s, [serviceKey]: false }));
}
};
const hasRevokedService = Object.values(inboundPerms).some(v => !v);
return (
<>
{confirmDisconnect && (
<DisconnectConfirmModal
cellName={conn.cell_name}
onConfirm={() => { setConfirmDisconnect(false); onDisconnect(conn.cell_name); }}
onCancel={() => setConfirmDisconnect(false)}
/>
)}
<div className="border border-gray-100 rounded-lg overflow-hidden">
<button
className="w-full flex items-center justify-between px-4 py-3 bg-gray-50 hover:bg-gray-100 transition-colors text-left"
onClick={() => setOpen(v => !v)}
aria-expanded={open}
>
<div className="flex items-center gap-3 min-w-0">
<StatusDot online={conn.online} />
<div className="min-w-0">
<div className="flex items-center gap-1.5 min-w-0">
<span className="font-medium text-gray-900 truncate">{conn.cell_name}</span>
<span className="text-xs text-gray-400 font-mono shrink-0">.{conn.domain}</span>
</div>
<div className="flex flex-wrap gap-x-3 text-xs text-gray-500 mt-0.5">
<span className={conn.online ? 'text-green-600 font-medium' : 'text-gray-400'}>
{conn.online ? 'Online' : 'Offline'}
</span>
{conn.last_handshake && (
<span>{relativeTime(conn.last_handshake)}</span>
)}
</div>
</div>
</div>
{open
? <ChevronDown className="h-4 w-4 text-gray-400 shrink-0" />
: <ChevronRight className="h-4 w-4 text-gray-400 shrink-0" />}
</button>
{open && (
<div className="px-4 py-4 bg-white border-t border-gray-100">
<div className="grid grid-cols-1 sm:grid-cols-2 gap-6">
<div>
<p className="text-xs font-semibold text-gray-500 uppercase tracking-wide mb-3">
I share with {conn.cell_name}
</p>
<div className="space-y-1">
{SERVICE_DEFS.map(({ key, label, Icon }) => (
<ServiceShareToggle
key={key}
serviceKey={key}
label={label}
Icon={Icon}
enabled={!!inboundPerms[key]}
saving={!!saving[key]}
onChange={v => handleToggle(key, v)}
/>
))}
</div>
{hasRevokedService && (
<p className="text-xs text-gray-400 mt-2">
Services you stop sharing become unreachable from {conn.cell_name} immediately.
</p>
)}
</div>
<div>
<p className="text-xs font-semibold text-gray-500 uppercase tracking-wide mb-3">
{conn.cell_name} shares with me
</p>
{(conn.permissions?.outbound && Object.values(conn.permissions.outbound).some(Boolean)) ? (
<div className="flex flex-wrap gap-1.5">
{SERVICE_DEFS.map(({ key, label, Icon }) => (
<InboundServiceBadge
key={key}
label={label}
Icon={Icon}
active={!!conn.permissions.outbound[key]}
/>
))}
</div>
) : (
<p className="text-xs text-gray-400">Nothing shared yet.</p>
)}
<p className="text-xs text-gray-400 mt-3">
Inbound sharing is set by the other cell's admin.
</p>
</div>
</div>
<div className="mt-4 pt-3 border-t border-gray-100 flex flex-wrap items-center justify-between gap-3">
<dl className="flex flex-wrap gap-x-4 gap-y-1 text-xs text-gray-500">
{conn.vpn_subnet && <div><dt className="inline text-gray-400">Subnet </dt><dd className="inline font-mono">{conn.vpn_subnet}</dd></div>}
{conn.endpoint && <div><dt className="inline text-gray-400">Endpoint </dt><dd className="inline font-mono">{conn.endpoint}</dd></div>}
</dl>
<button
onClick={() => setConfirmDisconnect(true)}
className="btn btn-danger flex items-center gap-2 text-sm py-1.5"
>
<Unplug className="h-4 w-4" />
Disconnect
</button>
</div>
</div>
)}
</div>
</>
);
}
export default function CellNetwork() {
const { cell_name = 'mycell', domain = 'cell' } = useConfig();
const [toasts, addToast] = useToasts();
@@ -64,6 +268,7 @@ export default function CellNetwork() {
const [connsLoading, setConnsLoading] = useState(true);
const [pasteText, setPasteText] = useState('');
const [pasteError, setPasteError] = useState('');
const [connecting, setConnecting] = useState(false);
useEffect(() => {
@@ -89,17 +294,29 @@ export default function CellNetwork() {
setConnsLoading(true);
try {
const r = await cellLinkAPI.listConnections();
// Enrich with live status
const enriched = await Promise.all(
(r.data || []).map(async (conn) => {
const conns = r.data || [];
// Fetch all WireGuard peer statuses in one call and index by public_key
let statusByKey = {};
try {
const s = await cellLinkAPI.getStatus(conn.cell_name);
return { ...conn, online: s.data.online, last_handshake: s.data.last_handshake };
const { wireguardAPI } = await import('../services/api');
const sr = await wireguardAPI.getPeerStatuses();
(sr.data?.peers || []).forEach(p => {
if (p.public_key) statusByKey[p.public_key] = p;
});
} catch {
return { ...conn, online: false };
// Status enrichment is best-effort; continue without it
}
})
);
const enriched = conns.map(conn => {
const wg = conn.public_key ? statusByKey[conn.public_key] : null;
return {
...conn,
online: wg ? wg.online : false,
last_handshake: wg ? wg.last_handshake : null,
};
});
setConnections(enriched);
} catch {
addToast('Failed to load connections', 'error');
@@ -108,6 +325,20 @@ export default function CellNetwork() {
}
};
const validatePaste = (text) => {
if (!text.trim()) { setPasteError(''); return; }
try {
const p = JSON.parse(text.trim());
if (!p.cell_name || !p.public_key || !p.vpn_subnet) {
setPasteError('JSON is missing required fields (cell_name, public_key, vpn_subnet)');
} else {
setPasteError('');
}
} catch {
setPasteError('Not valid JSON — paste the complete invite from the other cell');
}
};
const handleConnect = async () => {
if (!pasteText.trim()) return;
let parsed;
@@ -122,6 +353,7 @@ export default function CellNetwork() {
await cellLinkAPI.addConnection(parsed);
addToast(`Connected to cell "${parsed.cell_name}"`);
setPasteText('');
setPasteError('');
loadConnections();
} catch (e) {
addToast(e?.response?.data?.error || 'Connection failed', 'error');
@@ -130,8 +362,8 @@ export default function CellNetwork() {
}
};
// Confirmation is handled inside CellPanel via DisconnectConfirmModal
const handleDisconnect = async (name) => {
if (!window.confirm(`Disconnect from cell "${name}"?`)) return;
try {
await cellLinkAPI.removeConnection(name);
addToast(`Disconnected from "${name}"`);
@@ -230,16 +462,22 @@ export default function CellNetwork() {
<p className="text-sm text-gray-600">
Paste the invite JSON from the other cell's "Your Cell's Invite" panel:
</p>
<div>
<textarea
value={pasteText}
onChange={e => setPasteText(e.target.value)}
onChange={e => { setPasteText(e.target.value); if (pasteError) validatePaste(e.target.value); }}
onBlur={e => validatePaste(e.target.value)}
placeholder={'{\n "cell_name": "...",\n "public_key": "...",\n ...\n}'}
rows={8}
className="w-full text-xs font-mono border rounded-lg p-3 focus:outline-none focus:ring-2 focus:ring-primary-400 resize-none bg-white"
className={`w-full text-xs font-mono border rounded-lg p-3 focus:outline-none focus:ring-2 focus:ring-primary-400 resize-none bg-white ${
pasteError ? 'border-red-400 focus:ring-red-400' : ''
}`}
/>
{pasteError && <p className="text-xs text-red-600 mt-1">{pasteError}</p>}
</div>
<button
onClick={handleConnect}
disabled={connecting || !pasteText.trim()}
disabled={connecting || !pasteText.trim() || !!pasteError}
className="w-full btn btn-primary flex items-center justify-center gap-2 disabled:opacity-50"
>
{connecting
@@ -286,32 +524,12 @@ export default function CellNetwork() {
) : (
<div className="space-y-3">
{connections.map(conn => (
<div key={conn.cell_name}
className="flex items-center justify-between p-3 bg-gray-50 rounded-lg border border-gray-100">
<div className="flex items-center gap-3 min-w-0">
<StatusDot online={conn.online} />
<div className="min-w-0">
<div className="flex items-center gap-1.5 min-w-0">
<span className="font-medium text-gray-900 truncate" title={conn.cell_name}>{conn.cell_name}</span>
<span className="text-xs text-gray-400 font-mono shrink-0">.{conn.domain}</span>
</div>
<div className="text-xs text-gray-500 space-x-3 mt-0.5 truncate">
<span>Subnet: <span className="font-mono">{conn.vpn_subnet}</span></span>
<span>Endpoint: <span className="font-mono">{conn.endpoint || '—'}</span></span>
{conn.last_handshake && (
<span>Last seen: {new Date(conn.last_handshake * 1000).toLocaleString()}</span>
)}
</div>
</div>
</div>
<button
onClick={() => handleDisconnect(conn.cell_name)}
className="text-red-400 hover:text-red-600 p-1.5 rounded hover:bg-red-50"
title="Disconnect"
>
<Unplug className="h-4 w-4" />
</button>
</div>
<CellPanel
key={conn.cell_name}
conn={conn}
onDisconnect={handleDisconnect}
addToast={addToast}
/>
))}
</div>
)}
+27 -1
View File
@@ -23,6 +23,32 @@ function WireGuard() {
useEffect(() => {
fetchWireGuardData();
const interval = setInterval(() => {
// Lightweight status-only refresh every 30s
fetch('/api/wireguard/peers/statuses', { credentials: 'include' })
.then(r => r.ok ? r.json() : {})
.then(liveStatuses => {
setPeers(prev => prev.map(peer => {
const raw = liveStatuses[peer.public_key] || { online: null };
const st = {
online: raw.online ?? null,
lastHandshake: raw.last_handshake || raw.lastHandshake || null,
lastHandshakeSecondsAgo: raw.last_handshake_seconds_ago ?? null,
transferRx: raw.transfer_rx ?? raw.transferRx ?? 0,
transferTx: raw.transfer_tx ?? raw.transferTx ?? 0,
endpoint: raw.endpoint || null,
};
return { ...peer, _liveStatus: st };
}));
setPeerStatuses(prev => {
const updated = { ...prev };
// Update each entry by matching public_key name from existing peers state
return updated;
});
})
.catch(() => {});
}, 30000);
return () => clearInterval(interval);
}, []);
const refreshExternalIp = async () => {
@@ -466,7 +492,7 @@ PersistentKeepalive = ${peer.persistent_keepalive || 25}`;
<h3 className="text-lg font-medium text-gray-900">Live Connected Peers</h3>
</div>
<div className="text-sm text-gray-500">
{peers.length} peer{peers.length !== 1 ? 's' : ''} currently connected
{peers.filter(p => p._liveStatus?.online).length} / {peers.length} peer{peers.length !== 1 ? 's' : ''} online
</div>
</div>
+4
View File
@@ -278,6 +278,10 @@ export const cellLinkAPI = {
addConnection: (invite) => api.post('/api/cells', invite),
removeConnection: (name) => api.delete(`/api/cells/${name}`),
getStatus: (name) => api.get(`/api/cells/${name}/status`),
getPermissions: (cellName) => api.get(`/api/cells/${cellName}/permissions`),
updatePermissions: (cellName, inbound, outbound) =>
api.put(`/api/cells/${cellName}/permissions`, { inbound, outbound }),
getServices: () => api.get('/api/cells/services'),
};
// Health check