580d8af7ae
Root cause: write_env_file used os.replace() which creates a new inode. Docker file bind-mounts track the original inode at mount time, so the container's /app/.env.compose never saw updates — docker compose always read the stale port value and skipped container recreation. Fixes: - ip_utils.write_env_file: write in-place (open 'w') instead of os.replace() so Docker bind-mounted files see the update immediately - apply_pending_config: add --force-recreate to docker compose up for specific-container restarts, bypassing config-hash comparison as a belt-and-suspenders measure Tests added: - TestWriteEnvFileInPlace: verifies inode is preserved across writes - TestApplyPendingConfigForceRecreate: verifies --force-recreate is in the docker compose command for specific-container restarts Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
3148 lines
127 KiB
Python
3148 lines
127 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Personal Internet Cell API Server
|
|
|
|
Provides REST API endpoints for managing:
|
|
- Cell status and configuration
|
|
- Network services (DNS, DHCP, NTP)
|
|
- WireGuard VPN and peer management
|
|
- Email, Calendar, and File services
|
|
- Routing and VPN gateway
|
|
- Vault and trust management (Phase 6)
|
|
"""
|
|
|
|
import os
|
|
import io
|
|
import json
|
|
import zipfile
|
|
import shutil
|
|
import logging
|
|
from datetime import datetime
|
|
from flask import Flask, request, jsonify, current_app, send_file, session
|
|
from flask_cors import CORS
|
|
import threading
|
|
import time
|
|
from collections import deque
|
|
import json as pyjson
|
|
from logging.handlers import RotatingFileHandler
|
|
import uuid
|
|
import contextvars
|
|
|
|
# Track API start time for uptime calculation
|
|
API_START_TIME = time.time()
|
|
|
|
from network_manager import NetworkManager
|
|
from wireguard_manager import WireGuardManager
|
|
from peer_registry import PeerRegistry
|
|
from email_manager import EmailManager
|
|
from calendar_manager import CalendarManager
|
|
from file_manager import FileManager
|
|
from routing_manager import RoutingManager
|
|
from cell_manager import CellManager
|
|
from vault_manager import VaultManager
|
|
from container_manager import ContainerManager
|
|
from config_manager import ConfigManager
|
|
from service_bus import ServiceBus, EventType
|
|
from log_manager import LogManager
|
|
from cell_link_manager import CellLinkManager
|
|
import firewall_manager
|
|
from port_registry import PORT_FIELDS, detect_conflicts
|
|
from auth_manager import AuthManager
|
|
import auth_routes
|
|
|
|
# Context variable for request info
|
|
request_context = contextvars.ContextVar('request_context', default={})
|
|
|
|
# Set default log level and log file if not already defined
|
|
LOG_LEVEL = globals().get('LOG_LEVEL', 'INFO')
|
|
LOG_FILE = globals().get('LOG_FILE', 'picell.log')
|
|
|
|
class ContextFilter(logging.Filter):
|
|
def filter(self, record):
|
|
ctx = request_context.get({})
|
|
for k, v in ctx.items():
|
|
setattr(record, k, v)
|
|
return True
|
|
|
|
class JsonFormatter(logging.Formatter):
|
|
def format(self, record):
|
|
log_record = {
|
|
'timestamp': self.formatTime(record, self.datefmt),
|
|
'level': record.levelname,
|
|
'name': record.name,
|
|
'message': record.getMessage(),
|
|
'request_id': getattr(record, 'request_id', None),
|
|
'client_ip': getattr(record, 'client_ip', None),
|
|
'method': getattr(record, 'method', None),
|
|
'path': getattr(record, 'path', None),
|
|
'status': getattr(record, 'status', None),
|
|
'user': getattr(record, 'user', None),
|
|
}
|
|
if record.exc_info:
|
|
log_record['exception'] = self.formatException(record.exc_info)
|
|
return pyjson.dumps({k: v for k, v in log_record.items() if v is not None})
|
|
|
|
json_formatter = JsonFormatter()
|
|
context_filter = ContextFilter()
|
|
|
|
handlers = [logging.StreamHandler()]
|
|
try:
|
|
file_handler = RotatingFileHandler(LOG_FILE, maxBytes=5_000_000, backupCount=5, encoding='utf-8')
|
|
file_handler.setLevel(getattr(logging, LOG_LEVEL, logging.INFO))
|
|
file_handler.setFormatter(json_formatter)
|
|
file_handler.addFilter(context_filter)
|
|
handlers.append(file_handler)
|
|
except Exception as e:
|
|
print(f"Warning: Could not create rotating log file handler: {e}")
|
|
|
|
for h in handlers:
|
|
h.setFormatter(json_formatter)
|
|
h.addFilter(context_filter)
|
|
|
|
logging.basicConfig(
|
|
level=getattr(logging, LOG_LEVEL, logging.INFO),
|
|
handlers=handlers
|
|
)
|
|
logger = logging.getLogger('picell')
|
|
|
|
# Flask app setup
|
|
app = Flask(__name__)
|
|
CORS(app)
|
|
|
|
# Development mode flag
|
|
app.config['DEVELOPMENT_MODE'] = True # Set to True for development, False for production
|
|
app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY', os.urandom(32))
|
|
|
|
# Initialize enhanced components
|
|
config_manager = ConfigManager(
|
|
config_file=os.path.join(os.environ.get('CONFIG_DIR', '/app/config'), 'cell_config.json'),
|
|
data_dir=os.environ.get('DATA_DIR', '/app/data'),
|
|
)
|
|
service_bus = ServiceBus()
|
|
log_manager = LogManager(log_dir='./data/logs')
|
|
|
|
# Initialize service loggers
|
|
service_log_configs = {
|
|
'network': {'level': 'INFO', 'formatter': 'json', 'console': False},
|
|
'wireguard': {'level': 'INFO', 'formatter': 'json', 'console': False},
|
|
'email': {'level': 'INFO', 'formatter': 'json', 'console': False},
|
|
'calendar': {'level': 'INFO', 'formatter': 'json', 'console': False},
|
|
'files': {'level': 'INFO', 'formatter': 'json', 'console': False},
|
|
'routing': {'level': 'INFO', 'formatter': 'json', 'console': False},
|
|
'vault': {'level': 'INFO', 'formatter': 'json', 'console': False},
|
|
'api': {'level': 'INFO', 'formatter': 'json', 'console': True}
|
|
}
|
|
|
|
for service, config in service_log_configs.items():
|
|
log_manager.add_service_logger(service, config)
|
|
|
|
# Apply any persisted log level overrides
|
|
_levels_file = os.path.join(os.path.dirname(__file__), 'config', 'log_levels.json')
|
|
if os.path.exists(_levels_file):
|
|
try:
|
|
with open(_levels_file) as _f:
|
|
for _svc, _lvl in json.load(_f).items():
|
|
log_manager.set_service_level(_svc, _lvl)
|
|
except Exception:
|
|
pass
|
|
|
|
# Start service bus
|
|
service_bus.start()
|
|
|
|
@app.before_request
|
|
def enrich_log_context():
|
|
req_id = str(uuid.uuid4())
|
|
client_ip = request.remote_addr
|
|
method = request.method
|
|
path = request.path
|
|
user = getattr(getattr(request, 'user', None), 'id', None) or 'anonymous'
|
|
request_context.set({
|
|
'request_id': req_id,
|
|
'client_ip': client_ip,
|
|
'method': method,
|
|
'path': path,
|
|
'user': user
|
|
})
|
|
|
|
@app.before_request
|
|
def enforce_auth():
|
|
"""Enforce session-based authentication and role-based access control.
|
|
|
|
Rules:
|
|
- /api/auth/* is always public (login, logout, me, change-password)
|
|
- Non-/api/ paths (e.g. /health) are always public
|
|
- /api/peer/* is accessible to peer role only (admin gets 403)
|
|
- All other /api/* routes require admin role
|
|
|
|
Enforcement is active when auth_manager is a real AuthManager instance
|
|
with at least one registered user. Tests that do not seed the auth
|
|
store will see an empty user list and bypass enforcement, preserving
|
|
backward-compatibility with pre-auth test suites.
|
|
"""
|
|
path = request.path
|
|
# Always allow non-API paths and auth namespace
|
|
if not path.startswith('/api/') or path.startswith('/api/auth/'):
|
|
return None
|
|
# Only enforce when auth_manager has been properly initialised and seeded
|
|
try:
|
|
from auth_manager import AuthManager as _AuthManager
|
|
if not isinstance(auth_manager, _AuthManager):
|
|
return None
|
|
users = auth_manager.list_users()
|
|
if not users:
|
|
return None
|
|
except Exception:
|
|
return None
|
|
username = session.get('username')
|
|
if not username:
|
|
return jsonify({'error': 'Not authenticated'}), 401
|
|
role = session.get('role')
|
|
if path.startswith('/api/peer/'):
|
|
if role != 'peer':
|
|
return jsonify({'error': 'Forbidden'}), 403
|
|
else:
|
|
if role != 'admin':
|
|
return jsonify({'error': 'Forbidden'}), 403
|
|
return None
|
|
|
|
|
|
@app.after_request
|
|
def log_request(response):
|
|
ctx = request_context.get({})
|
|
ctx['status'] = response.status_code
|
|
logger.info(f"{ctx.get('method')} {ctx.get('path')} {ctx.get('status')}")
|
|
return response
|
|
|
|
@app.teardown_request
|
|
def clear_log_context(exc):
|
|
request_context.set({})
|
|
|
|
# Initialize managers — paths configurable via env for testing
|
|
_DATA_DIR = os.environ.get('DATA_DIR', '/app/data')
|
|
_CONFIG_DIR = os.environ.get('CONFIG_DIR', '/app/config')
|
|
|
|
network_manager = NetworkManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR)
|
|
wireguard_manager = WireGuardManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR)
|
|
peer_registry = PeerRegistry(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR)
|
|
email_manager = EmailManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR)
|
|
calendar_manager = CalendarManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR)
|
|
file_manager = FileManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR)
|
|
routing_manager = RoutingManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR)
|
|
app.vault_manager = VaultManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR)
|
|
container_manager = ContainerManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR)
|
|
cell_link_manager = CellLinkManager(
|
|
data_dir=_DATA_DIR, config_dir=_CONFIG_DIR,
|
|
wireguard_manager=wireguard_manager, network_manager=network_manager,
|
|
)
|
|
auth_manager = AuthManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR)
|
|
auth_routes.auth_manager = auth_manager
|
|
|
|
# Apply firewall + DNS rules from stored peer settings (survives API restarts)
|
|
def _configured_domain() -> str:
|
|
return config_manager.configs.get('_identity', {}).get('domain', 'cell')
|
|
|
|
|
|
def _apply_startup_enforcement():
|
|
try:
|
|
peers = peer_registry.list_peers()
|
|
firewall_manager.apply_all_peer_rules(peers)
|
|
firewall_manager.apply_all_dns_rules(peers, COREFILE_PATH, _configured_domain())
|
|
logger.info(f"Applied enforcement rules for {len(peers)} peers on startup")
|
|
except Exception as e:
|
|
logger.warning(f"Startup enforcement failed (non-fatal): {e}")
|
|
|
|
def _bootstrap_dns():
|
|
try:
|
|
identity = config_manager.configs.get('_identity', {})
|
|
cell_name = identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell'))
|
|
domain = identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell'))
|
|
ip_range = identity.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16'))
|
|
network_manager.bootstrap_dns_records(cell_name, domain, ip_range)
|
|
except Exception as e:
|
|
logger.warning(f"DNS bootstrap failed (non-fatal): {e}")
|
|
|
|
COREFILE_PATH = '/app/config/dns/Corefile'
|
|
|
|
# Run in background so startup isn't blocked waiting on docker exec
|
|
threading.Thread(target=_apply_startup_enforcement, daemon=True).start()
|
|
threading.Thread(target=_bootstrap_dns, daemon=True).start()
|
|
|
|
# Register services with service bus
|
|
service_bus.register_service('network', network_manager)
|
|
service_bus.register_service('wireguard', wireguard_manager)
|
|
service_bus.register_service('email', email_manager)
|
|
service_bus.register_service('calendar', calendar_manager)
|
|
service_bus.register_service('files', file_manager)
|
|
service_bus.register_service('routing', routing_manager)
|
|
service_bus.register_service('vault', app.vault_manager)
|
|
service_bus.register_service('container', container_manager)
|
|
|
|
# Register auth blueprint
|
|
app.register_blueprint(auth_routes.auth_bp)
|
|
|
|
# Unified health monitoring
|
|
HEALTH_HISTORY_SIZE = 100
|
|
health_history = deque(maxlen=HEALTH_HISTORY_SIZE)
|
|
health_monitor_running = True
|
|
|
|
# Health alerting configuration
|
|
HEALTH_ALERT_THRESHOLD = 3 # Number of consecutive failures before alert
|
|
service_alert_counters = {}
|
|
|
|
def perform_health_check():
|
|
"""Perform a unified health check of all services, with alerting."""
|
|
try:
|
|
# Use service bus to get health from all services
|
|
result = {
|
|
'timestamp': datetime.utcnow().isoformat(),
|
|
'alerts': []
|
|
}
|
|
|
|
# Get health from each service
|
|
for service_name in service_bus.list_services():
|
|
try:
|
|
service = service_bus.get_service(service_name)
|
|
if hasattr(service, 'health_check'):
|
|
health = service.health_check()
|
|
else:
|
|
health = service.get_status()
|
|
result[service_name] = health
|
|
except Exception as e:
|
|
result[service_name] = {'error': str(e), 'status': 'offline'}
|
|
|
|
# Health alerting logic — alert only when a service container is not running
|
|
global service_alert_counters
|
|
for service_name in service_bus.list_services():
|
|
if service_name in result:
|
|
status = result[service_name]
|
|
healthy = True
|
|
|
|
if isinstance(status, dict):
|
|
# Prefer status.running (container actually up) over healthy (connectivity tests)
|
|
inner = status.get('status', {})
|
|
if isinstance(inner, dict):
|
|
if 'running' in inner:
|
|
healthy = inner['running']
|
|
elif 'status' in inner:
|
|
healthy = str(inner['status']).lower() in ('ok', 'healthy', 'online', 'active')
|
|
elif 'running' in status:
|
|
healthy = status['running']
|
|
elif 'error' in status:
|
|
healthy = False
|
|
else:
|
|
healthy = bool(status)
|
|
|
|
# Only count as unhealthy if we're certain it's down
|
|
if not healthy:
|
|
service_alert_counters[service_name] = service_alert_counters.get(service_name, 0) + 1
|
|
if service_alert_counters[service_name] >= HEALTH_ALERT_THRESHOLD:
|
|
alert_msg = f"ALERT: {service_name} unhealthy for {service_alert_counters[service_name]} consecutive checks."
|
|
logger.warning(alert_msg)
|
|
result['alerts'].append(alert_msg)
|
|
|
|
# Publish alert event
|
|
service_bus.publish_event(EventType.ERROR_OCCURRED, service_name, {
|
|
'error': alert_msg,
|
|
'service': service_name,
|
|
'consecutive_failures': service_alert_counters[service_name]
|
|
})
|
|
else:
|
|
# Reset counter if service is healthy
|
|
if service_alert_counters.get(service_name, 0) > 0:
|
|
logger.info(f"Service {service_name} recovered, resetting alert counter")
|
|
service_alert_counters[service_name] = 0
|
|
|
|
logger.info(f"Unified health check: {result}")
|
|
return result
|
|
except Exception as e:
|
|
logger.error(f"Unified health check failed: {e}")
|
|
return {'error': str(e), 'timestamp': datetime.utcnow().isoformat()}
|
|
|
|
def health_monitor_loop():
|
|
while health_monitor_running:
|
|
with app.app_context():
|
|
health_result = perform_health_check()
|
|
health_history.appendleft(health_result)
|
|
|
|
# Publish health check event
|
|
service_bus.publish_event(EventType.HEALTH_CHECK, 'api', health_result)
|
|
time.sleep(60) # Check every 60 seconds
|
|
|
|
# Start health monitor thread
|
|
health_monitor_thread = threading.Thread(target=health_monitor_loop, daemon=True)
|
|
health_monitor_thread.start()
|
|
|
|
def _local_subnets():
|
|
"""Return all subnets the container is directly connected to (from routing table)."""
|
|
import ipaddress as _ipa, socket as _sock, struct as _struct
|
|
nets = []
|
|
try:
|
|
with open('/proc/net/route') as _f:
|
|
for _line in _f.readlines()[1:]:
|
|
_parts = _line.strip().split()
|
|
if len(_parts) < 8 or _parts[0] == 'lo':
|
|
continue
|
|
_dest = _sock.inet_ntoa(_struct.pack('<I', int(_parts[1], 16)))
|
|
_mask = _sock.inet_ntoa(_struct.pack('<I', int(_parts[7], 16)))
|
|
if _dest == '0.0.0.0':
|
|
continue
|
|
nets.append(_ipa.ip_network(f'{_dest}/{_mask}', strict=False))
|
|
except Exception:
|
|
pass
|
|
return nets
|
|
|
|
|
|
def is_local_request():
|
|
# Trust the direct TCP peer (request.remote_addr) first — it is always
|
|
# the container or process making the connection and cannot be spoofed.
|
|
# In production Flask is behind Caddy inside Docker, so remote_addr is
|
|
# always Caddy's Docker IP (RFC-1918) and this check is sufficient.
|
|
#
|
|
# Additionally, when a trusted reverse-proxy (Caddy) is in the path, it
|
|
# appends the real client IP as the LAST entry of X-Forwarded-For.
|
|
# Trusting only the LAST XFF entry (not the first, which a client could
|
|
# set to anything) is safe: a spoofed first entry such as
|
|
# "XFF: 127.0.0.1, <real-ip>" still passes because the last entry is the
|
|
# real IP appended by Caddy. An attacker directly hitting Flask on :3000
|
|
# could craft any XFF they like, but in the Docker topology port 3000 is
|
|
# not exposed to the internet.
|
|
remote_addr = request.remote_addr
|
|
|
|
def _allowed(addr):
|
|
if not addr:
|
|
return False
|
|
if addr in ('127.0.0.1', '::1', 'localhost'):
|
|
return True
|
|
try:
|
|
import ipaddress as _ipa
|
|
ip = _ipa.ip_address(addr.strip())
|
|
if ip.is_loopback:
|
|
return True
|
|
# RFC-1918 private ranges
|
|
for _rfc in ('10.0.0.0/8', '172.16.0.0/12', '192.168.0.0/16'):
|
|
if ip in _ipa.ip_network(_rfc):
|
|
return True
|
|
# Any subnet the container is directly attached to (handles non-RFC-1918
|
|
# Docker bridge networks such as 172.0.0.0/24).
|
|
for _net in _local_subnets():
|
|
if ip in _net:
|
|
return True
|
|
# Configured cell ip_range (WireGuard peer subnet)
|
|
_cell = config_manager.configs.get('_identity', {}).get(
|
|
'ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16'))
|
|
if ip in _ipa.ip_network(_cell, strict=False):
|
|
return True
|
|
except Exception:
|
|
pass
|
|
return False
|
|
|
|
if _allowed(remote_addr):
|
|
return True
|
|
|
|
# Check the last X-Forwarded-For entry (appended by the trusted proxy).
|
|
# Never trust any entry other than the last one.
|
|
try:
|
|
xff = request.headers.get('X-Forwarded-For', '')
|
|
if xff:
|
|
last_ip = xff.split(',')[-1].strip()
|
|
if last_ip and _allowed(last_ip):
|
|
return True
|
|
except Exception:
|
|
pass
|
|
|
|
return False
|
|
|
|
@app.route('/health', methods=['GET'])
|
|
def health_check():
|
|
"""Health check endpoint."""
|
|
try:
|
|
return jsonify({
|
|
"status": "healthy",
|
|
"timestamp": datetime.utcnow().isoformat(),
|
|
"version": "1.0.0"
|
|
})
|
|
except Exception as e:
|
|
logger.error(f"Health check failed: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/status', methods=['GET'])
|
|
def get_cell_status():
|
|
"""Get overall cell status."""
|
|
try:
|
|
# Use service bus to get status from all services
|
|
services_status = {}
|
|
for service_name in service_bus.list_services():
|
|
try:
|
|
service = service_bus.get_service(service_name)
|
|
services_status[service_name] = service.get_status()
|
|
except Exception as e:
|
|
services_status[service_name] = {'error': str(e)}
|
|
|
|
peers = peer_registry.list_peers()
|
|
|
|
# Calculate actual uptime
|
|
current_time = time.time()
|
|
uptime_seconds = int(current_time - API_START_TIME)
|
|
|
|
identity = config_manager.configs.get('_identity', {})
|
|
return jsonify({
|
|
"cell_name": identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell')),
|
|
"domain": identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell')),
|
|
"uptime": uptime_seconds,
|
|
"peers_count": len(peers),
|
|
"services": services_status,
|
|
"timestamp": datetime.utcnow().isoformat()
|
|
})
|
|
except Exception as e:
|
|
logger.error(f"Error getting cell status: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/config', methods=['GET'])
|
|
def get_config():
|
|
"""Get cell configuration."""
|
|
try:
|
|
service_configs = config_manager.get_all_configs()
|
|
identity = service_configs.pop('_identity', {})
|
|
config = {
|
|
'cell_name': identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell')),
|
|
'domain': identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell')),
|
|
'ip_range': identity.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')),
|
|
'wireguard_port': identity.get('wireguard_port', int(os.environ.get('WG_PORT', '51820'))),
|
|
}
|
|
# Expose computed per-service IPs so the frontend doesn't need to derive them
|
|
import ip_utils as _ip_utils_cfg
|
|
_ips = _ip_utils_cfg.get_service_ips(config['ip_range'])
|
|
config['service_ips'] = {
|
|
'dns': _ips['dns'],
|
|
'vip_mail': _ips['vip_mail'],
|
|
'vip_calendar': _ips['vip_calendar'],
|
|
'vip_files': _ips['vip_files'],
|
|
'vip_webdav': _ips['vip_webdav'],
|
|
}
|
|
config['service_configs'] = service_configs
|
|
return jsonify(config)
|
|
except Exception as e:
|
|
logger.error(f"Error getting config: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/config', methods=['PUT'])
|
|
def update_config():
|
|
"""Update cell configuration."""
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
if data is None:
|
|
return jsonify({"error": "No data provided"}), 400
|
|
|
|
# Handle identity fields (cell_name, domain, ip_range, wireguard_port)
|
|
identity_keys = {'cell_name', 'domain', 'ip_range', 'wireguard_port'}
|
|
identity_updates = {k: v for k, v in data.items() if k in identity_keys}
|
|
|
|
# Validate cell_name — must be non-empty and at most 255 characters (DNS limit)
|
|
if 'cell_name' in identity_updates:
|
|
v = str(identity_updates['cell_name'])
|
|
if len(v) > 255:
|
|
return jsonify({'error': 'cell_name must be 255 characters or fewer'}), 400
|
|
if not v:
|
|
return jsonify({'error': 'cell_name cannot be empty'}), 400
|
|
|
|
# Validate domain — must be non-empty and at most 255 characters (DNS limit)
|
|
if 'domain' in identity_updates:
|
|
v = str(identity_updates['domain'])
|
|
if len(v) > 255:
|
|
return jsonify({'error': 'domain must be 255 characters or fewer'}), 400
|
|
if not v:
|
|
return jsonify({'error': 'domain cannot be empty'}), 400
|
|
|
|
# Validate ip_range — must be a valid CIDR within an RFC-1918 range
|
|
if 'ip_range' in identity_updates:
|
|
import ipaddress as _ipa
|
|
_rfc1918 = [
|
|
_ipa.ip_network('10.0.0.0/8'),
|
|
_ipa.ip_network('172.16.0.0/12'),
|
|
_ipa.ip_network('192.168.0.0/16'),
|
|
]
|
|
try:
|
|
_raw = str(identity_updates['ip_range'])
|
|
if '/' not in _raw:
|
|
return jsonify({'error': 'ip_range must include a CIDR prefix (e.g. 172.20.0.0/16)'}), 400
|
|
_net = _ipa.ip_network(_raw, strict=False)
|
|
if not any(_net.subnet_of(r) for r in _rfc1918):
|
|
return jsonify({'error': (
|
|
'ip_range must be within an RFC-1918 private range '
|
|
'(10.0.0.0/8, 172.16.0.0/12, or 192.168.0.0/16)'
|
|
)}), 400
|
|
except ValueError as _e:
|
|
return jsonify({'error': f'Invalid ip_range: {_e}'}), 400
|
|
|
|
# Validate service config port and IP fields
|
|
_port_fields = {
|
|
'network': ['dns_port'],
|
|
'wireguard': ['port'],
|
|
'email': ['smtp_port', 'submission_port', 'imap_port', 'webmail_port'],
|
|
'calendar': ['port'],
|
|
'files': ['port', 'manager_port'],
|
|
}
|
|
for _svc, _fields in _port_fields.items():
|
|
if _svc not in data:
|
|
continue
|
|
_svc_data = data[_svc]
|
|
if not isinstance(_svc_data, dict):
|
|
continue
|
|
for _f in _fields:
|
|
if _f in _svc_data and _svc_data[_f] is not None and _svc_data[_f] != '':
|
|
try:
|
|
_p = int(_svc_data[_f])
|
|
if not (1 <= _p <= 65535):
|
|
raise ValueError()
|
|
except (ValueError, TypeError):
|
|
return jsonify({'error': f'{_svc}.{_f} must be an integer between 1 and 65535'}), 400
|
|
# Validate that no two service sections use the same port number
|
|
_conflicts = detect_conflicts(config_manager.configs, data)
|
|
if _conflicts:
|
|
_msgs = []
|
|
for _c in _conflicts:
|
|
_pairs = ', '.join(f"{_s}.{_f}" for _s, _f in _c['conflicts'])
|
|
_msgs.append(f"port {_c['port']} is used by {_pairs}")
|
|
return jsonify({'error': 'Port conflict: ' + '; '.join(_msgs)}), 409
|
|
# Validate WireGuard address (must be valid IP/CIDR)
|
|
if 'wireguard' in data and isinstance(data['wireguard'], dict):
|
|
_addr = data['wireguard'].get('address')
|
|
if _addr:
|
|
import ipaddress as _ipa2
|
|
if '/' not in str(_addr):
|
|
return jsonify({'error': 'wireguard.address must include a prefix length (e.g. 10.0.0.1/24)'}), 400
|
|
try:
|
|
_ipa2.ip_interface(_addr)
|
|
except ValueError as _e:
|
|
return jsonify({'error': f'wireguard.address is not a valid IP/CIDR: {_e}'}), 400
|
|
|
|
# Capture old identity and service configs BEFORE saving, for change detection + revert
|
|
import copy as _copy
|
|
old_identity = dict(config_manager.configs.get('_identity', {}))
|
|
old_svc_configs = {
|
|
svc: dict(config_manager.configs.get(svc, {}))
|
|
for svc in data if svc in config_manager.service_schemas
|
|
}
|
|
# Full pre-change snapshot — used by Discard to revert to original state.
|
|
# Must be captured here, before any config writes, so it holds the true old values.
|
|
_pre_change_snapshot = {k: _copy.deepcopy(v) for k, v in config_manager.configs.items()
|
|
if not k.startswith('_')}
|
|
_pre_change_snapshot['_identity'] = _copy.deepcopy(config_manager.configs.get('_identity', {}))
|
|
if identity_updates:
|
|
stored = config_manager.configs.get('_identity', {})
|
|
stored.update(identity_updates)
|
|
config_manager.configs['_identity'] = stored
|
|
config_manager._save_all_configs()
|
|
|
|
# Map service names to their manager instances
|
|
_svc_managers = {
|
|
'network': network_manager,
|
|
'wireguard': wireguard_manager,
|
|
'email': email_manager,
|
|
'calendar': calendar_manager,
|
|
'files': file_manager,
|
|
'routing': routing_manager,
|
|
'vault': app.vault_manager,
|
|
}
|
|
|
|
all_restarted = []
|
|
all_warnings = []
|
|
|
|
# Update service configurations: persist + apply to real config files
|
|
for service, config in data.items():
|
|
if service in config_manager.service_schemas:
|
|
config_manager.update_service_config(service, config)
|
|
mgr = _svc_managers.get(service)
|
|
if mgr:
|
|
mgr.update_config(config)
|
|
result = mgr.apply_config(config)
|
|
all_restarted.extend(result.get('restarted', []))
|
|
all_warnings.extend(result.get('warnings', []))
|
|
service_bus.publish_event(EventType.CONFIG_CHANGED, service, {
|
|
'service': service,
|
|
'config': config
|
|
})
|
|
# VPN port or subnet change → all peer client configs are stale
|
|
if service == 'wireguard' and ('port' in config or 'address' in config):
|
|
for p in peer_registry.list_peers():
|
|
peer_registry.update_peer(p['peer'], {'config_needs_reinstall': True})
|
|
n = len(peer_registry.list_peers())
|
|
if n:
|
|
all_warnings.append(f'WireGuard endpoint changed — {n} peer(s) must reinstall VPN config')
|
|
# Keep identity.wireguard_port in sync with service config port
|
|
if 'port' in config:
|
|
_id = config_manager.configs.get('_identity', {})
|
|
_id['wireguard_port'] = config['port']
|
|
config_manager.configs['_identity'] = _id
|
|
config_manager._save_all_configs()
|
|
|
|
# Apply cell identity domain to network and email services (write files, defer reload)
|
|
if identity_updates.get('domain') and identity_updates['domain'] != old_identity.get('domain', ''):
|
|
domain = identity_updates['domain']
|
|
net_result = network_manager.apply_domain(domain, reload=False)
|
|
all_warnings.extend(net_result.get('warnings', []))
|
|
# Regenerate Caddyfile — virtual host names change with the domain
|
|
import ip_utils as _ip_domain
|
|
_cur_id = config_manager.configs.get('_identity', {})
|
|
_cur_range = _cur_id.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16'))
|
|
_cur_name = _cur_id.get('cell_name', os.environ.get('CELL_NAME', 'mycell'))
|
|
_ip_domain.write_caddyfile(_cur_range, _cur_name, domain, '/app/config/caddy/Caddyfile')
|
|
_set_pending_restart(
|
|
[f'domain changed to {domain}'],
|
|
['dns', 'caddy'],
|
|
pre_change_snapshot=_pre_change_snapshot,
|
|
)
|
|
|
|
# Apply cell name change to DNS hostname record (write files, defer reload)
|
|
if identity_updates.get('cell_name'):
|
|
old_name = old_identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell'))
|
|
new_name = identity_updates['cell_name']
|
|
if old_name != new_name:
|
|
cn_result = network_manager.apply_cell_name(old_name, new_name, reload=False)
|
|
all_warnings.extend(cn_result.get('warnings', []))
|
|
# Regenerate Caddyfile — main virtual host name changes with cell_name
|
|
import ip_utils as _ip_name
|
|
_cur_id2 = config_manager.configs.get('_identity', {})
|
|
_cur_range2 = _cur_id2.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16'))
|
|
_cur_domain2 = identity_updates.get('domain') or _cur_id2.get('domain', os.environ.get('CELL_DOMAIN', 'cell'))
|
|
_ip_name.write_caddyfile(_cur_range2, new_name, _cur_domain2, '/app/config/caddy/Caddyfile')
|
|
_set_pending_restart(
|
|
[f'cell_name changed to {new_name}'],
|
|
['dns'],
|
|
pre_change_snapshot=_pre_change_snapshot,
|
|
)
|
|
|
|
# Apply ip_range change: regenerate DNS records, update virtual IPs + firewall rules
|
|
if identity_updates.get('ip_range') and identity_updates['ip_range'] != old_identity.get('ip_range', ''):
|
|
import ip_utils
|
|
new_range = identity_updates['ip_range']
|
|
cur_identity = config_manager.configs.get('_identity', {})
|
|
cur_cell_name = cur_identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell'))
|
|
cur_domain = cur_identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell'))
|
|
# Update DNS zone records immediately
|
|
ip_result = network_manager.apply_ip_range(new_range, cur_cell_name, cur_domain)
|
|
all_restarted.extend(ip_result.get('restarted', []))
|
|
all_warnings.extend(ip_result.get('warnings', []))
|
|
# Update firewall virtual IPs (iptables) and Caddy virtual IPs immediately
|
|
firewall_manager.update_service_ips(new_range)
|
|
firewall_manager.ensure_caddy_virtual_ips()
|
|
# Write new .env with updated IPs (and current ports) for next container start
|
|
env_file = os.environ.get('COMPOSE_ENV_FILE', '/app/.env.compose')
|
|
ip_utils.write_env_file(new_range, env_file, _collect_service_ports(config_manager.configs))
|
|
# Regenerate Caddyfile with new VIPs
|
|
ip_utils.write_caddyfile(new_range, cur_cell_name, cur_domain,
|
|
'/app/config/caddy/Caddyfile')
|
|
# Mark ALL containers as needing restart; network_recreate signals that
|
|
# docker compose down is required before up (Docker can't change subnet in-place)
|
|
_set_pending_restart(
|
|
[f'ip_range changed to {new_range} — network will be recreated'],
|
|
['*'], network_recreate=True,
|
|
pre_change_snapshot=_pre_change_snapshot,
|
|
)
|
|
|
|
# Detect port changes across service configs and identity
|
|
# Maps (service_key, field_name) → (port_env_key, [containers])
|
|
_PORT_CHANGE_MAP = {
|
|
('network', 'dns_port'): ('dns_port', ['dns']),
|
|
('wireguard','port'): ('wg_port', ['wireguard']),
|
|
('email', 'smtp_port'): ('mail_smtp_port', ['mail']),
|
|
('email', 'submission_port'): ('mail_submission_port', ['mail']),
|
|
('email', 'imap_port'): ('mail_imap_port', ['mail']),
|
|
('email', 'webmail_port'): ('rainloop_port', ['rainloop']),
|
|
('calendar', 'port'): ('radicale_port', ['radicale']),
|
|
('files', 'port'): ('webdav_port', ['webdav']),
|
|
('files', 'manager_port'): ('filegator_port', ['filegator']),
|
|
}
|
|
|
|
port_changed_containers = set()
|
|
port_change_messages = []
|
|
|
|
import ip_utils as _ip_utils_pcd
|
|
for (svc_key, field), (_env_key, containers) in _PORT_CHANGE_MAP.items():
|
|
if svc_key in data and field in data[svc_key]:
|
|
default_val = _ip_utils_pcd.PORT_DEFAULTS.get(_env_key)
|
|
old_val = old_svc_configs.get(svc_key, {}).get(field, default_val)
|
|
new_val = data[svc_key][field]
|
|
if old_val != new_val:
|
|
port_changed_containers.update(containers)
|
|
port_change_messages.append(
|
|
f'{svc_key} {field}: {old_val} → {new_val}'
|
|
)
|
|
|
|
# wireguard_port in identity also drives WG_PORT env var; sync to service config
|
|
if 'wireguard_port' in identity_updates:
|
|
old_wg = old_identity.get('wireguard_port', _ip_utils_pcd.PORT_DEFAULTS.get('wg_port', 51820))
|
|
new_wg = identity_updates['wireguard_port']
|
|
if old_wg != new_wg:
|
|
# Sync to wireguard service config and update wg0.conf
|
|
_wg_svc = config_manager.configs.get('wireguard', {})
|
|
_wg_svc['port'] = new_wg
|
|
config_manager.update_service_config('wireguard', _wg_svc)
|
|
wireguard_manager.apply_config({'port': new_wg})
|
|
port_changed_containers.add('wireguard')
|
|
port_change_messages.append(f'wireguard_port: {old_wg} → {new_wg}')
|
|
|
|
if port_changed_containers:
|
|
import ip_utils as _ip_utils_ports
|
|
_env_file = os.environ.get('COMPOSE_ENV_FILE', '/app/.env.compose')
|
|
_ip_range = config_manager.configs.get('_identity', {}).get(
|
|
'ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')
|
|
)
|
|
_ip_utils_ports.write_env_file(
|
|
_ip_range, _env_file, _collect_service_ports(config_manager.configs)
|
|
)
|
|
_set_pending_restart(port_change_messages, list(port_changed_containers),
|
|
pre_change_snapshot=_pre_change_snapshot)
|
|
|
|
logger.info(f"Updated config, restarted: {all_restarted}")
|
|
return jsonify({
|
|
"message": "Configuration updated and applied",
|
|
"restarted": all_restarted,
|
|
"warnings": all_warnings,
|
|
})
|
|
except Exception as e:
|
|
logger.error(f"Error updating config: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Pending-restart helpers
|
|
# ---------------------------------------------------------------------------
|
|
|
|
def _collect_service_ports(configs: dict) -> dict:
|
|
"""Extract current port values from service configs for .env generation."""
|
|
ports = {}
|
|
net = configs.get('network', {})
|
|
wg = configs.get('wireguard', {})
|
|
email = configs.get('email', {})
|
|
cal = configs.get('calendar', {})
|
|
files = configs.get('files', {})
|
|
identity = configs.get('_identity', {})
|
|
|
|
if 'dns_port' in net: ports['dns_port'] = net['dns_port']
|
|
if 'port' in wg: ports['wg_port'] = wg['port']
|
|
elif 'wireguard_port' in identity: ports['wg_port'] = identity['wireguard_port']
|
|
if 'smtp_port' in email: ports['mail_smtp_port'] = email['smtp_port']
|
|
if 'submission_port' in email: ports['mail_submission_port'] = email['submission_port']
|
|
if 'imap_port' in email: ports['mail_imap_port'] = email['imap_port']
|
|
if 'webmail_port' in email: ports['rainloop_port'] = email['webmail_port']
|
|
if 'port' in cal: ports['radicale_port'] = cal['port']
|
|
if 'port' in files: ports['webdav_port'] = files['port']
|
|
if 'manager_port' in files: ports['filegator_port'] = files['manager_port']
|
|
return ports
|
|
|
|
|
|
def _dedup_changes(existing: list, new: list) -> list:
|
|
"""Merge change lists, keeping only the latest entry per config key."""
|
|
def key_of(msg: str) -> str:
|
|
# "ip_range changed to X" → "ip_range"
|
|
if ' changed' in msg:
|
|
return msg.split(' changed')[0].strip()
|
|
# "network dns_port: 52 → 53" → "network dns_port"
|
|
if ':' in msg:
|
|
return msg.split(':')[0].strip()
|
|
return msg
|
|
merged = {key_of(c): c for c in existing}
|
|
merged.update({key_of(c): c for c in new})
|
|
return list(merged.values())
|
|
|
|
|
|
def _set_pending_restart(changes: list, containers: list = None, network_recreate: bool = False,
|
|
pre_change_snapshot: dict = None):
|
|
"""Record that specific containers need to be restarted to apply configuration.
|
|
|
|
containers: list of docker-compose service names, or None/'*' to restart all.
|
|
network_recreate: True when the Docker bridge subnet changed (requires down+up).
|
|
pre_change_snapshot: full config captured BEFORE this save (for Discard to revert).
|
|
Merges with any existing pending state so multiple changes accumulate.
|
|
"""
|
|
from datetime import datetime as _dt
|
|
existing = config_manager.configs.get('_pending_restart', {})
|
|
existing_changes = existing.get('changes', []) if existing.get('needs_restart') else []
|
|
existing_containers = existing.get('containers', []) if existing.get('needs_restart') else []
|
|
|
|
# Keep the oldest snapshot (the true pre-change state). Never overwrite it with a
|
|
# later snapshot — subsequent changes while pending should still revert to origin.
|
|
if not existing.get('needs_restart'):
|
|
snapshot = pre_change_snapshot or {}
|
|
else:
|
|
snapshot = existing.get('_snapshot', {})
|
|
|
|
if containers is None or '*' in (containers or []) or existing_containers == ['*']:
|
|
new_containers = ['*']
|
|
else:
|
|
new_containers = list(set(existing_containers) | set(containers))
|
|
|
|
config_manager.configs['_pending_restart'] = {
|
|
'needs_restart': True,
|
|
'changed_at': _dt.utcnow().isoformat(),
|
|
'changes': _dedup_changes(existing_changes, changes),
|
|
'containers': new_containers,
|
|
'network_recreate': network_recreate or existing.get('network_recreate', False),
|
|
'_snapshot': snapshot,
|
|
}
|
|
config_manager._save_all_configs()
|
|
|
|
|
|
def _clear_pending_restart():
|
|
config_manager.configs['_pending_restart'] = {
|
|
'needs_restart': False, 'changes': [], 'containers': [], 'network_recreate': False
|
|
}
|
|
config_manager._save_all_configs()
|
|
|
|
|
|
@app.route('/api/config/pending', methods=['GET'])
|
|
def get_pending_config():
|
|
"""Return whether there are unapplied configuration changes that require a restart."""
|
|
pending = config_manager.configs.get('_pending_restart', {})
|
|
return jsonify({
|
|
'needs_restart': pending.get('needs_restart', False),
|
|
'changed_at': pending.get('changed_at'),
|
|
'changes': pending.get('changes', []),
|
|
'containers': pending.get('containers', ['*']),
|
|
})
|
|
|
|
|
|
@app.route('/api/config/pending', methods=['DELETE'])
|
|
def cancel_pending_config():
|
|
"""Discard pending configuration changes and restore config to pre-change snapshot."""
|
|
pending = config_manager.configs.get('_pending_restart', {})
|
|
snapshot = pending.get('_snapshot', {})
|
|
if snapshot:
|
|
# Capture current (changed) identity before reverting, to rewrite config files
|
|
cur_identity = dict(config_manager.configs.get('_identity', {}))
|
|
old_identity = snapshot.get('_identity', {})
|
|
|
|
# Restore config values from snapshot
|
|
for k, v in snapshot.items():
|
|
config_manager.configs[k] = v
|
|
|
|
# Rewrite DNS/Caddy config files back to old values so they match restored config
|
|
import ip_utils as _ip_revert
|
|
_id = config_manager.configs.get('_identity', {})
|
|
_range = _id.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16'))
|
|
_cell = _id.get('cell_name', os.environ.get('CELL_NAME', 'mycell'))
|
|
_dom = _id.get('domain', os.environ.get('CELL_DOMAIN', 'cell'))
|
|
|
|
cur_domain = cur_identity.get('domain', '')
|
|
old_domain = old_identity.get('domain', '')
|
|
if cur_domain and old_domain and cur_domain != old_domain:
|
|
network_manager.apply_domain(old_domain, reload=False)
|
|
|
|
cur_cell_name = cur_identity.get('cell_name', '')
|
|
old_cell_name = old_identity.get('cell_name', '')
|
|
if cur_cell_name and old_cell_name and cur_cell_name != old_cell_name:
|
|
network_manager.apply_cell_name(cur_cell_name, old_cell_name, reload=False)
|
|
|
|
_ip_revert.write_caddyfile(_range, _cell, _dom, '/app/config/caddy/Caddyfile')
|
|
|
|
_clear_pending_restart()
|
|
return jsonify({'message': 'Pending changes discarded'})
|
|
|
|
|
|
@app.route('/api/config/apply', methods=['POST'])
|
|
def apply_pending_config():
|
|
"""Apply pending configuration by restarting containers via docker compose up -d."""
|
|
try:
|
|
pending = config_manager.configs.get('_pending_restart', {})
|
|
if not pending.get('needs_restart'):
|
|
return jsonify({'message': 'No pending changes to apply'})
|
|
|
|
# Get project working dir and image name from our own container labels
|
|
project_dir = '/home/roof/pic'
|
|
api_image = 'pic_api:latest' # fallback (docker-compose v1 naming)
|
|
try:
|
|
import docker as _docker_sdk
|
|
_client = _docker_sdk.from_env()
|
|
_self = _client.containers.get('cell-api')
|
|
project_dir = _self.labels.get('com.docker.compose.project.working_dir', project_dir)
|
|
# Use the actual image tag so the helper works regardless of compose version
|
|
# (docker-compose v1 builds pic_api:latest, compose v2+ builds pic-api:latest)
|
|
tags = _self.image.tags
|
|
if tags:
|
|
api_image = tags[0]
|
|
except Exception:
|
|
pass
|
|
|
|
containers = pending.get('containers', ['*'])
|
|
|
|
# Clear pending flag before we restart so it shows cleared after new containers start
|
|
_clear_pending_restart()
|
|
|
|
# Check if the IP range (network subnet) is changing — Docker cannot modify an
|
|
# existing network's subnet in-place, so we need `down` + `up` in that case.
|
|
needs_network_recreate = pending.get('network_recreate', False)
|
|
|
|
host_env = os.path.join(project_dir, '.env')
|
|
host_compose = os.path.join(project_dir, 'docker-compose.yml')
|
|
|
|
if '*' in containers:
|
|
# All-services restart: `docker compose down` or `up -d` may stop/recreate the
|
|
# API container itself, killing this background thread mid-operation.
|
|
# Spawn an independent helper container (same image as cell-api) that has docker
|
|
# CLI and survives cell-api being stopped/recreated.
|
|
if needs_network_recreate:
|
|
helper_script = (
|
|
f'sleep 2'
|
|
f' && docker compose --project-directory {project_dir}'
|
|
f' -f {host_compose} --env-file {host_env} down'
|
|
f' && docker compose --project-directory {project_dir}'
|
|
f' -f {host_compose} --env-file {host_env} up -d'
|
|
)
|
|
else:
|
|
helper_script = (
|
|
f'sleep 2'
|
|
f' && docker compose --project-directory {project_dir}'
|
|
f' -f {host_compose} --env-file {host_env} up -d'
|
|
)
|
|
|
|
def _do_apply():
|
|
import subprocess as _subprocess
|
|
_subprocess.Popen(
|
|
['docker', 'run', '--rm',
|
|
'-v', '/var/run/docker.sock:/var/run/docker.sock',
|
|
'-v', f'{project_dir}:{project_dir}',
|
|
'--entrypoint', 'sh',
|
|
api_image,
|
|
'-c', helper_script],
|
|
close_fds=True,
|
|
stdout=_subprocess.DEVNULL,
|
|
stderr=_subprocess.DEVNULL,
|
|
)
|
|
logger.info(
|
|
'spawned helper container for all-services restart'
|
|
+ (' (network_recreate)' if needs_network_recreate else '')
|
|
)
|
|
else:
|
|
# Specific containers only — API is not affected, run directly from here.
|
|
def _do_apply():
|
|
import time as _time
|
|
import subprocess as _subprocess
|
|
_time.sleep(0.3)
|
|
result = _subprocess.run(
|
|
['docker', 'compose',
|
|
'--project-directory', project_dir,
|
|
'-f', '/app/docker-compose.yml',
|
|
'--env-file', '/app/.env.compose',
|
|
'up', '-d', '--no-deps', '--force-recreate'] + containers,
|
|
capture_output=True, text=True, timeout=120,
|
|
)
|
|
if result.returncode != 0:
|
|
logger.error(f"docker compose up failed: {result.stderr.strip()}")
|
|
else:
|
|
logger.info(f'docker compose up completed for: {containers}')
|
|
|
|
threading.Thread(target=_do_apply, daemon=False).start()
|
|
|
|
return jsonify({
|
|
'message': 'Applying configuration — containers are restarting',
|
|
'restart_in_progress': True,
|
|
})
|
|
except Exception as e:
|
|
logger.error(f"Error applying config: {e}")
|
|
return jsonify({'error': str(e)}), 500
|
|
|
|
|
|
# Configuration management endpoints
|
|
@app.route('/api/config/backup', methods=['POST'])
|
|
def create_config_backup():
|
|
"""Create configuration backup."""
|
|
try:
|
|
backup_id = config_manager.backup_config()
|
|
service_bus.publish_event(EventType.BACKUP_CREATED, 'api', {
|
|
'backup_id': backup_id,
|
|
'timestamp': datetime.utcnow().isoformat()
|
|
})
|
|
return jsonify({"backup_id": backup_id})
|
|
except Exception as e:
|
|
logger.error(f"Error creating backup: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/config/backups', methods=['GET'])
|
|
def list_config_backups():
|
|
"""List available backups."""
|
|
try:
|
|
backups = config_manager.list_backups()
|
|
return jsonify(backups)
|
|
except Exception as e:
|
|
logger.error(f"Error listing backups: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/config/restore/<backup_id>', methods=['POST'])
|
|
def restore_config(backup_id):
|
|
"""Restore configuration from backup. Body may contain {services: [...]} for selective restore."""
|
|
try:
|
|
data = request.get_json(silent=True) or {}
|
|
services = data.get('services') # None = full restore
|
|
success = config_manager.restore_config(backup_id, services=services)
|
|
if success:
|
|
service_bus.publish_event(EventType.RESTORE_COMPLETED, 'api', {
|
|
'backup_id': backup_id,
|
|
'timestamp': datetime.utcnow().isoformat()
|
|
})
|
|
return jsonify({"message": f"Configuration restored from backup: {backup_id}"})
|
|
else:
|
|
return jsonify({"error": f"Failed to restore backup: {backup_id}"}), 500
|
|
except Exception as e:
|
|
logger.error(f"Error restoring backup: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/config/export', methods=['GET'])
|
|
def export_config():
|
|
"""Export configuration."""
|
|
try:
|
|
format = request.args.get('format', 'json')
|
|
config_data = config_manager.export_config(format)
|
|
return jsonify({"config": config_data, "format": format})
|
|
except Exception as e:
|
|
logger.error(f"Error exporting config: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/config/import', methods=['POST'])
|
|
def import_config():
|
|
"""Import configuration."""
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
if data is None:
|
|
return jsonify({"error": "No data provided"}), 400
|
|
|
|
config_data = data.get('config')
|
|
format = data.get('format', 'json')
|
|
|
|
success = config_manager.import_config(config_data, format)
|
|
if success:
|
|
return jsonify({"message": "Configuration imported successfully"})
|
|
else:
|
|
return jsonify({"error": "Failed to import configuration"}), 500
|
|
except Exception as e:
|
|
logger.error(f"Error importing config: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/config/backups/<backup_id>/download', methods=['GET'])
|
|
def download_backup(backup_id):
|
|
"""Download a backup as a zip file."""
|
|
try:
|
|
from pathlib import Path
|
|
backup_path = config_manager.backup_dir / backup_id
|
|
if not backup_path.exists():
|
|
return jsonify({'error': f'Backup {backup_id} not found'}), 404
|
|
buf = io.BytesIO()
|
|
with zipfile.ZipFile(buf, 'w', zipfile.ZIP_DEFLATED) as zf:
|
|
for f in backup_path.rglob('*'):
|
|
if f.is_file():
|
|
zf.write(f, f.relative_to(backup_path))
|
|
buf.seek(0)
|
|
return send_file(buf, mimetype='application/zip',
|
|
as_attachment=True,
|
|
download_name=f'{backup_id}.zip')
|
|
except Exception as e:
|
|
logger.error(f"Error downloading backup: {e}")
|
|
return jsonify({'error': str(e)}), 500
|
|
|
|
@app.route('/api/config/backup/upload', methods=['POST'])
|
|
def upload_backup():
|
|
"""Upload a backup zip file."""
|
|
try:
|
|
if 'file' not in request.files:
|
|
return jsonify({'error': 'No file provided'}), 400
|
|
f = request.files['file']
|
|
filename = f.filename or ''
|
|
if filename.endswith('.zip'):
|
|
backup_id = filename[:-4]
|
|
else:
|
|
backup_id = f"backup_{datetime.utcnow().strftime('%Y%m%d_%H%M%S')}"
|
|
backup_id = ''.join(c for c in backup_id if c.isalnum() or c == '_')
|
|
backup_path = config_manager.backup_dir / backup_id
|
|
backup_path.mkdir(parents=True, exist_ok=True)
|
|
try:
|
|
with zipfile.ZipFile(io.BytesIO(f.read())) as zf:
|
|
zf.extractall(backup_path)
|
|
except zipfile.BadZipFile:
|
|
shutil.rmtree(backup_path, ignore_errors=True)
|
|
return jsonify({'error': 'Invalid zip file'}), 400
|
|
if not (backup_path / 'manifest.json').exists():
|
|
shutil.rmtree(backup_path, ignore_errors=True)
|
|
return jsonify({'error': 'Invalid backup: missing manifest.json'}), 400
|
|
return jsonify({'backup_id': backup_id})
|
|
except Exception as e:
|
|
logger.error(f"Error uploading backup: {e}")
|
|
return jsonify({'error': str(e)}), 500
|
|
|
|
@app.route('/api/config/backups/<backup_id>', methods=['DELETE'])
|
|
def delete_config_backup(backup_id):
|
|
"""Delete a configuration backup."""
|
|
try:
|
|
success = config_manager.delete_backup(backup_id)
|
|
if success:
|
|
return jsonify({"message": f"Backup {backup_id} deleted"})
|
|
else:
|
|
return jsonify({"error": f"Failed to delete backup {backup_id}"}), 500
|
|
except Exception as e:
|
|
logger.error(f"Error deleting backup: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
# Service bus endpoints
|
|
@app.route('/api/services/bus/status', methods=['GET'])
|
|
def get_service_bus_status():
|
|
"""Get service bus status."""
|
|
try:
|
|
return jsonify(service_bus.get_service_status_summary())
|
|
except Exception as e:
|
|
logger.error(f"Error getting service bus status: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/services/bus/events', methods=['GET'])
|
|
def get_service_bus_events():
|
|
"""Get service bus event history."""
|
|
try:
|
|
event_type = request.args.get('type')
|
|
source = request.args.get('source')
|
|
limit = int(request.args.get('limit', 100))
|
|
|
|
events = service_bus.get_event_history(
|
|
EventType(event_type) if event_type else None,
|
|
source,
|
|
limit
|
|
)
|
|
|
|
# Convert events to serializable format
|
|
serializable_events = []
|
|
for event in events:
|
|
serializable_events.append({
|
|
'event_id': event.event_id,
|
|
'event_type': event.event_type.value,
|
|
'source': event.source,
|
|
'data': event.data,
|
|
'timestamp': event.timestamp.isoformat()
|
|
})
|
|
|
|
return jsonify(serializable_events)
|
|
except Exception as e:
|
|
logger.error(f"Error getting service bus events: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/services/bus/services/<service_name>/start', methods=['POST'])
|
|
def start_service(service_name):
|
|
"""Start a service with orchestration."""
|
|
try:
|
|
success = service_bus.orchestrate_service_start(service_name)
|
|
if success:
|
|
return jsonify({"message": f"Service {service_name} started successfully"})
|
|
else:
|
|
return jsonify({"error": f"Failed to start service {service_name}"}), 500
|
|
except Exception as e:
|
|
logger.error(f"Error starting service {service_name}: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/services/bus/services/<service_name>/stop', methods=['POST'])
|
|
def stop_service(service_name):
|
|
"""Stop a service with orchestration."""
|
|
try:
|
|
success = service_bus.orchestrate_service_stop(service_name)
|
|
if success:
|
|
return jsonify({"message": f"Service {service_name} stopped successfully"})
|
|
else:
|
|
return jsonify({"error": f"Failed to stop service {service_name}"}), 500
|
|
except Exception as e:
|
|
logger.error(f"Error stopping service {service_name}: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/services/bus/services/<service_name>/restart', methods=['POST'])
|
|
def restart_service(service_name):
|
|
"""Restart a service with orchestration."""
|
|
try:
|
|
success = service_bus.orchestrate_service_restart(service_name)
|
|
if success:
|
|
return jsonify({"message": f"Service {service_name} restarted successfully"})
|
|
else:
|
|
return jsonify({"error": f"Failed to restart service {service_name}"}), 500
|
|
except Exception as e:
|
|
logger.error(f"Error restarting service {service_name}: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
# Logging endpoints
|
|
@app.route('/api/logs/services/<service>', methods=['GET'])
|
|
def get_service_logs(service):
|
|
"""Get logs for a specific service."""
|
|
try:
|
|
level = request.args.get('level', 'INFO')
|
|
lines = int(request.args.get('lines', 50))
|
|
|
|
logs = log_manager.get_service_logs(service, level, lines)
|
|
return jsonify({"service": service, "logs": logs})
|
|
except Exception as e:
|
|
logger.error(f"Error getting logs for {service}: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/logs/search', methods=['POST'])
|
|
def search_logs():
|
|
"""Search logs across all services."""
|
|
try:
|
|
data = request.get_json(silent=True) or {}
|
|
query = data.get('query', '')
|
|
services = data.get('services')
|
|
level = data.get('level')
|
|
time_range = data.get('time_range')
|
|
|
|
results = log_manager.search_logs(query, time_range, services, level)
|
|
return jsonify({"results": results, "count": len(results)})
|
|
except Exception as e:
|
|
logger.error(f"Error searching logs: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/logs/export', methods=['POST'])
|
|
def export_logs():
|
|
"""Export logs in specified format."""
|
|
try:
|
|
data = request.get_json(silent=True) or {}
|
|
format = data.get('format', 'json')
|
|
filters = data.get('filters', {})
|
|
|
|
log_data = log_manager.export_logs(format, filters)
|
|
return jsonify({"logs": log_data, "format": format})
|
|
except Exception as e:
|
|
logger.error(f"Error exporting logs: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/logs/statistics', methods=['GET'])
|
|
def get_log_statistics():
|
|
"""Get log statistics."""
|
|
try:
|
|
service = request.args.get('service')
|
|
stats = log_manager.get_log_statistics(service)
|
|
return jsonify(stats)
|
|
except Exception as e:
|
|
logger.error(f"Error getting log statistics: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/logs/rotate', methods=['POST'])
|
|
def rotate_logs():
|
|
"""Manually rotate an API service log file."""
|
|
try:
|
|
data = request.get_json(silent=True) or {}
|
|
service = data.get('service') # None = rotate all
|
|
log_manager.rotate_logs(service)
|
|
return jsonify({"message": "Logs rotated successfully"})
|
|
except Exception as e:
|
|
logger.error(f"Error rotating logs: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/logs/files', methods=['GET'])
|
|
def get_log_file_infos():
|
|
"""List service log files with sizes."""
|
|
try:
|
|
return jsonify(log_manager.get_all_log_file_infos())
|
|
except Exception as e:
|
|
logger.error(f"Error listing log files: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/logs/verbosity', methods=['GET'])
|
|
def get_log_verbosity():
|
|
"""Return current per-service log levels."""
|
|
try:
|
|
return jsonify(log_manager.get_service_levels())
|
|
except Exception as e:
|
|
logger.error(f"Error getting log verbosity: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/logs/verbosity', methods=['PUT'])
|
|
def set_log_verbosity():
|
|
"""Update log levels for one or all services. Body: {service: level} map."""
|
|
try:
|
|
data = request.get_json(silent=True) or {}
|
|
for service, level in data.items():
|
|
log_manager.set_service_level(service, level)
|
|
# Persist to config so levels survive API restarts
|
|
levels_file = os.path.join(os.path.dirname(__file__), 'config', 'log_levels.json')
|
|
os.makedirs(os.path.dirname(levels_file), exist_ok=True)
|
|
current = {}
|
|
if os.path.exists(levels_file):
|
|
try:
|
|
with open(levels_file) as f:
|
|
current = json.load(f)
|
|
except Exception:
|
|
pass
|
|
current.update(data)
|
|
with open(levels_file, 'w') as f:
|
|
json.dump(current, f, indent=2)
|
|
return jsonify({"message": "Log levels updated", "levels": log_manager.get_service_levels()})
|
|
except Exception as e:
|
|
logger.error(f"Error setting log verbosity: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
# Network Services API
|
|
@app.route('/api/dns/records', methods=['GET'])
|
|
def get_dns_records():
|
|
"""Get DNS records."""
|
|
try:
|
|
records = network_manager.get_dns_records()
|
|
return jsonify(records)
|
|
except Exception as e:
|
|
logger.error(f"Error getting DNS records: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/dns/records', methods=['POST'])
|
|
def add_dns_record():
|
|
"""Add DNS record."""
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
if data is None:
|
|
return jsonify({"error": "No data provided"}), 400
|
|
result = network_manager.add_dns_record(**data)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error adding DNS record: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/dns/records', methods=['DELETE'])
|
|
def remove_dns_record():
|
|
"""Remove DNS record."""
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
result = network_manager.remove_dns_record(**data)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error removing DNS record: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/dhcp/leases', methods=['GET'])
|
|
def get_dhcp_leases():
|
|
"""Get DHCP leases."""
|
|
try:
|
|
leases = network_manager.get_dhcp_leases()
|
|
return jsonify(leases)
|
|
except Exception as e:
|
|
logger.error(f"Error getting DHCP leases: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/dhcp/reservations', methods=['POST'])
|
|
def add_dhcp_reservation():
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
if not data:
|
|
return jsonify({"error": "No data provided"}), 400
|
|
for field in ('mac', 'ip'):
|
|
if field not in data:
|
|
return jsonify({"error": f"Missing required field: {field}"}), 400
|
|
result = network_manager.add_dhcp_reservation(data['mac'], data['ip'], data.get('hostname', ''))
|
|
return jsonify({"success": result})
|
|
except Exception as e:
|
|
logger.error(f"Error adding DHCP reservation: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/dhcp/reservations', methods=['DELETE'])
|
|
def remove_dhcp_reservation():
|
|
"""Remove DHCP reservation."""
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
if not data or 'mac' not in data:
|
|
return jsonify({"error": "Missing required field: mac"}), 400
|
|
result = network_manager.remove_dhcp_reservation(data['mac'])
|
|
return jsonify({"success": result})
|
|
except Exception as e:
|
|
logger.error(f"Error removing DHCP reservation: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/ntp/status', methods=['GET'])
|
|
def get_ntp_status():
|
|
"""Get NTP status."""
|
|
try:
|
|
status = network_manager.get_ntp_status()
|
|
return jsonify(status)
|
|
except Exception as e:
|
|
logger.error(f"Error getting NTP status: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/network/info', methods=['GET'])
|
|
def get_network_info():
|
|
"""Get general network info (interfaces, gateway, DNS, etc.)"""
|
|
try:
|
|
info = network_manager.get_network_info()
|
|
return jsonify(info)
|
|
except Exception as e:
|
|
logger.error(f"Error getting network info: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/dns/status', methods=['GET'])
|
|
def get_dns_status():
|
|
"""Get DNS service status and summary info."""
|
|
try:
|
|
status = network_manager.get_dns_status()
|
|
return jsonify(status)
|
|
except Exception as e:
|
|
logger.error(f"Error getting DNS status: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/network/test', methods=['POST'])
|
|
def test_network():
|
|
try:
|
|
result = network_manager.test_connectivity()
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error testing network: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
# WireGuard API
|
|
@app.route('/api/wireguard/keys', methods=['GET'])
|
|
def get_wireguard_keys():
|
|
"""Get WireGuard keys (public key only; private key never leaves the server)."""
|
|
try:
|
|
keys = wireguard_manager.get_keys()
|
|
return jsonify({
|
|
'public_key': keys.get('public_key', ''),
|
|
'has_private_key': bool(keys.get('private_key')),
|
|
})
|
|
except Exception as e:
|
|
logger.error(f"Error getting WireGuard keys: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/wireguard/keys/peer', methods=['POST'])
|
|
def generate_peer_keys():
|
|
"""Generate peer keys."""
|
|
try:
|
|
data = request.get_json(silent=True) or {}
|
|
name = data.get('name') or data.get('peer_name')
|
|
if not name:
|
|
return jsonify({"error": "Missing peer name"}), 400
|
|
result = wireguard_manager.generate_peer_keys(name)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error generating peer keys: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/wireguard/config', methods=['GET'])
|
|
def get_wireguard_config():
|
|
"""Get WireGuard configuration."""
|
|
try:
|
|
result = wireguard_manager.get_config()
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error getting WireGuard config: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/wireguard/peers', methods=['GET'])
|
|
def get_wireguard_peers():
|
|
"""Get WireGuard peers."""
|
|
try:
|
|
peers = wireguard_manager.get_peers()
|
|
return jsonify(peers)
|
|
except Exception as e:
|
|
logger.error(f"Error getting WireGuard peers: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/wireguard/peers', methods=['POST'])
|
|
def add_wireguard_peer():
|
|
"""Add WireGuard peer."""
|
|
try:
|
|
data = request.get_json(silent=True) or {}
|
|
result = wireguard_manager.add_peer(
|
|
name=data.get('name', ''),
|
|
public_key=data.get('public_key', ''),
|
|
endpoint_ip=data.get('endpoint', data.get('endpoint_ip', '')),
|
|
allowed_ips=data.get('allowed_ips', ''),
|
|
persistent_keepalive=data.get('persistent_keepalive', 25)
|
|
)
|
|
return jsonify({"success": result})
|
|
except Exception as e:
|
|
logger.error(f"Error adding WireGuard peer: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/wireguard/peers', methods=['DELETE'])
|
|
def remove_wireguard_peer():
|
|
"""Remove WireGuard peer."""
|
|
try:
|
|
data = request.get_json(silent=True) or {}
|
|
public_key = data.get('public_key') or data.get('name', '')
|
|
result = wireguard_manager.remove_peer(public_key)
|
|
return jsonify({"success": result})
|
|
except Exception as e:
|
|
logger.error(f"Error removing WireGuard peer: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/wireguard/status', methods=['GET'])
|
|
def get_wireguard_status():
|
|
"""Get WireGuard status."""
|
|
try:
|
|
status = wireguard_manager.get_status()
|
|
return jsonify(status)
|
|
except Exception as e:
|
|
logger.error(f"Error getting WireGuard status: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/wireguard/connectivity', methods=['POST'])
|
|
def test_wireguard_connectivity():
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
if data is None:
|
|
return jsonify({"error": "No data provided"}), 400
|
|
result = wireguard_manager.test_connectivity(data)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error testing WireGuard connectivity: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/wireguard/peers/ip', methods=['PUT'])
|
|
def update_peer_ip():
|
|
"""Update peer IP."""
|
|
try:
|
|
data = request.get_json(silent=True) or {}
|
|
result = wireguard_manager.update_peer_ip(
|
|
data.get('public_key', data.get('peer', '')),
|
|
data.get('ip', '')
|
|
)
|
|
return jsonify({"success": result})
|
|
except Exception as e:
|
|
logger.error(f"Error updating peer IP: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/wireguard/peers/status', methods=['POST'])
|
|
def get_peer_status():
|
|
"""Get live WireGuard status for a single peer."""
|
|
try:
|
|
data = request.get_json(silent=True) or {}
|
|
public_key = data.get('public_key', '')
|
|
if not public_key:
|
|
return jsonify({"error": "Missing public_key"}), 400
|
|
status = wireguard_manager.get_peer_status(public_key)
|
|
return jsonify(status)
|
|
except Exception as e:
|
|
logger.error(f"Error getting peer status: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/wireguard/peers/statuses', methods=['GET'])
|
|
def get_all_peer_statuses():
|
|
"""Get live WireGuard status for all peers (keyed by public_key)."""
|
|
try:
|
|
statuses = wireguard_manager.get_all_peer_statuses()
|
|
return jsonify(statuses)
|
|
except Exception as e:
|
|
logger.error(f"Error getting peer statuses: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/wireguard/network/setup', methods=['POST'])
|
|
def setup_network():
|
|
"""Setup network configuration for internet access."""
|
|
try:
|
|
success = wireguard_manager.setup_network_configuration()
|
|
if success:
|
|
return jsonify({"message": "Network configuration setup completed successfully"})
|
|
else:
|
|
return jsonify({"error": "Failed to setup network configuration"}), 500
|
|
except Exception as e:
|
|
logger.error(f"Error setting up network configuration: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/wireguard/network/status', methods=['GET'])
|
|
def get_network_status():
|
|
"""Get network configuration status."""
|
|
try:
|
|
status = wireguard_manager.get_network_status()
|
|
return jsonify(status)
|
|
except Exception as e:
|
|
logger.error(f"Error getting network status: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/wireguard/peers/config', methods=['POST'])
|
|
def get_peer_config():
|
|
try:
|
|
data = request.get_json(silent=True) or {}
|
|
peer_name = data.get('name', data.get('peer', ''))
|
|
|
|
# Look up peer details from registry if not supplied
|
|
peer_ip = data.get('ip', '')
|
|
peer_private_key = data.get('private_key', '')
|
|
registered = peer_registry.get_peer(peer_name) if peer_name else {}
|
|
if peer_name and (not peer_ip or not peer_private_key):
|
|
if registered:
|
|
peer_ip = peer_ip or registered.get('ip', '')
|
|
peer_private_key = peer_private_key or registered.get('private_key', '')
|
|
|
|
# Use real external endpoint if not supplied
|
|
server_endpoint = data.get('server_endpoint', '')
|
|
if not server_endpoint:
|
|
srv = wireguard_manager.get_server_config()
|
|
server_endpoint = srv.get('endpoint') or '<SERVER_IP>'
|
|
|
|
# Determine AllowedIPs: explicit > peer's stored internet_access > default full tunnel
|
|
allowed_ips = data.get('allowed_ips') or None
|
|
if not allowed_ips and registered:
|
|
internet_access = registered.get('internet_access', True)
|
|
allowed_ips = wireguard_manager.FULL_TUNNEL_IPS if internet_access else wireguard_manager.get_split_tunnel_ips()
|
|
|
|
result = wireguard_manager.get_peer_config(
|
|
peer_name=peer_name,
|
|
peer_ip=peer_ip,
|
|
peer_private_key=peer_private_key,
|
|
server_endpoint=server_endpoint,
|
|
allowed_ips=allowed_ips,
|
|
)
|
|
return jsonify({"config": result})
|
|
except Exception as e:
|
|
logger.error(f"Error getting peer config: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/wireguard/server-config', methods=['GET'])
|
|
def get_server_config():
|
|
try:
|
|
config = wireguard_manager.get_server_config()
|
|
return jsonify(config)
|
|
except Exception as e:
|
|
logger.error(f"Error getting server config: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/wireguard/refresh-ip', methods=['POST'])
|
|
def refresh_external_ip():
|
|
try:
|
|
ip = wireguard_manager.get_external_ip(force_refresh=True)
|
|
port = wireguard_manager._get_configured_port()
|
|
return jsonify({
|
|
'external_ip': ip,
|
|
'port': port,
|
|
'endpoint': f'{ip}:{port}' if ip else None,
|
|
})
|
|
except Exception as e:
|
|
logger.error(f"Error refreshing external IP: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/wireguard/apply-enforcement', methods=['POST'])
|
|
def apply_wireguard_enforcement():
|
|
"""Re-apply per-peer iptables and DNS enforcement rules (call after WireGuard restart)."""
|
|
try:
|
|
peers = peer_registry.list_peers()
|
|
firewall_manager.apply_all_peer_rules(peers)
|
|
firewall_manager.apply_all_dns_rules(peers, COREFILE_PATH, _configured_domain())
|
|
return jsonify({'ok': True, 'peers': len(peers)})
|
|
except Exception as e:
|
|
return jsonify({'error': str(e)}), 500
|
|
|
|
@app.route('/api/wireguard/check-port', methods=['POST'])
|
|
def check_wireguard_port():
|
|
try:
|
|
port_open = wireguard_manager.check_port_open()
|
|
return jsonify({'port_open': port_open, 'port': wireguard_manager._get_configured_port()})
|
|
except Exception as e:
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
# ── Cell-to-cell connections ─────────────────────────────────────────────────
|
|
|
|
@app.route('/api/cells/invite', methods=['GET'])
|
|
def get_cell_invite():
|
|
"""Generate an invite package for this cell."""
|
|
try:
|
|
identity = config_manager.configs.get('_identity', {})
|
|
cell_name = identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell'))
|
|
domain = identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell'))
|
|
invite = cell_link_manager.generate_invite(cell_name, domain)
|
|
return jsonify(invite)
|
|
except Exception as e:
|
|
logger.error(f"Error generating cell invite: {e}")
|
|
return jsonify({'error': str(e)}), 500
|
|
|
|
@app.route('/api/cells', methods=['GET'])
|
|
def list_cell_connections():
|
|
"""List all connected cells."""
|
|
try:
|
|
return jsonify(cell_link_manager.list_connections())
|
|
except Exception as e:
|
|
return jsonify({'error': str(e)}), 500
|
|
|
|
@app.route('/api/cells', methods=['POST'])
|
|
def add_cell_connection():
|
|
"""Connect to a remote cell using their invite package."""
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
if not data:
|
|
return jsonify({'error': 'No data provided'}), 400
|
|
for field in ('cell_name', 'public_key', 'vpn_subnet', 'dns_ip', 'domain'):
|
|
if field not in data:
|
|
return jsonify({'error': f'Missing field: {field}'}), 400
|
|
link = cell_link_manager.add_connection(data)
|
|
return jsonify({'message': f"Connected to cell '{data['cell_name']}'", 'link': link}), 201
|
|
except ValueError as e:
|
|
return jsonify({'error': str(e)}), 400
|
|
except Exception as e:
|
|
logger.error(f"Error adding cell connection: {e}")
|
|
return jsonify({'error': str(e)}), 500
|
|
|
|
@app.route('/api/cells/<cell_name>', methods=['DELETE'])
|
|
def remove_cell_connection(cell_name):
|
|
"""Disconnect from a remote cell."""
|
|
try:
|
|
cell_link_manager.remove_connection(cell_name)
|
|
return jsonify({'message': f"Cell '{cell_name}' disconnected"})
|
|
except ValueError as e:
|
|
return jsonify({'error': str(e)}), 404
|
|
except Exception as e:
|
|
logger.error(f"Error removing cell connection: {e}")
|
|
return jsonify({'error': str(e)}), 500
|
|
|
|
@app.route('/api/cells/<cell_name>/status', methods=['GET'])
|
|
def get_cell_connection_status(cell_name):
|
|
"""Get live status for a connected cell."""
|
|
try:
|
|
status = cell_link_manager.get_connection_status(cell_name)
|
|
return jsonify(status)
|
|
except ValueError as e:
|
|
return jsonify({'error': str(e)}), 404
|
|
except Exception as e:
|
|
return jsonify({'error': str(e)}), 500
|
|
|
|
# Peer Registry API
|
|
@app.route('/api/peers', methods=['GET'])
|
|
def get_peers():
|
|
"""Get all peers."""
|
|
try:
|
|
peers = peer_registry.list_peers()
|
|
return jsonify(peers)
|
|
except Exception as e:
|
|
logger.error(f"Error getting peers: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
def _next_peer_ip() -> str:
|
|
"""Auto-assign the next free host address from the configured VPN subnet."""
|
|
import ipaddress
|
|
server_addr = wireguard_manager._get_configured_address() # e.g. '10.0.0.1/24'
|
|
network = ipaddress.ip_network(server_addr, strict=False)
|
|
server_ip = str(ipaddress.ip_interface(server_addr).ip)
|
|
used = {p.get('ip', '').split('/')[0] for p in peer_registry.list_peers()}
|
|
for host in network.hosts():
|
|
ip = str(host)
|
|
if ip == server_ip:
|
|
continue
|
|
if ip not in used:
|
|
return ip
|
|
raise ValueError(f'No free IPs left in {network}')
|
|
|
|
|
|
@app.route('/api/peers', methods=['POST'])
|
|
def add_peer():
|
|
"""Add a peer and auto-provision auth/email/calendar/files accounts."""
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
if data is None:
|
|
return jsonify({"error": "No data provided"}), 400
|
|
|
|
# Validate required fields (ip is optional — auto-assigned if omitted)
|
|
required_fields = ['name', 'public_key']
|
|
for field in required_fields:
|
|
if field not in data:
|
|
return jsonify({"error": f"Missing required field: {field}"}), 400
|
|
|
|
# Password is required for peer provisioning
|
|
password = data.get('password') or ''
|
|
if not password:
|
|
return jsonify({"error": "Missing required field: password"}), 400
|
|
if len(password) < 10:
|
|
return jsonify({"error": "password must be at least 10 characters"}), 400
|
|
|
|
assigned_ip = data.get('ip') or _next_peer_ip()
|
|
|
|
# Validate service_access if provided
|
|
_valid_services = {'calendar', 'files', 'mail', 'webdav'}
|
|
service_access = data.get('service_access', list(_valid_services))
|
|
if not isinstance(service_access, list) or not all(s in _valid_services for s in service_access):
|
|
return jsonify({"error": f"service_access must be a list of: {sorted(_valid_services)}"}), 400
|
|
|
|
peer_name = data['name']
|
|
|
|
# --- Provision auth account (hard-required) ---
|
|
if not auth_manager.create_user(peer_name, password, 'peer'):
|
|
return jsonify({"error": f"Could not create auth account (duplicate name?)"}), 400
|
|
|
|
# --- Provision service accounts (best-effort; failures logged but non-fatal) ---
|
|
provisioned = ['auth']
|
|
domain = _configured_domain()
|
|
for step_name, step_fn in [
|
|
('email', lambda: email_manager.create_email_user(peer_name, domain, password)),
|
|
('calendar', lambda: calendar_manager.create_calendar_user(peer_name, password)),
|
|
('files', lambda: file_manager.create_user(peer_name, password)),
|
|
]:
|
|
try:
|
|
if step_fn():
|
|
provisioned.append(step_name)
|
|
else:
|
|
logger.warning(f"Peer {peer_name}: {step_name} account creation returned False (service may not be ready)")
|
|
except Exception as e:
|
|
logger.warning(f"Peer {peer_name}: {step_name} account creation failed (non-fatal): {e}")
|
|
|
|
# Add peer to registry with all provided fields
|
|
peer_info = {
|
|
'peer': peer_name,
|
|
'ip': assigned_ip,
|
|
'public_key': data['public_key'],
|
|
'private_key': data.get('private_key'),
|
|
'server_public_key': data.get('server_public_key'),
|
|
'server_endpoint': data.get('server_endpoint'),
|
|
'allowed_ips': data.get('allowed_ips'),
|
|
'persistent_keepalive': data.get('persistent_keepalive'),
|
|
'description': data.get('description'),
|
|
'internet_access': data.get('internet_access', True),
|
|
'service_access': service_access,
|
|
'peer_access': data.get('peer_access', True),
|
|
'config_needs_reinstall': False,
|
|
}
|
|
|
|
success = peer_registry.add_peer(peer_info)
|
|
if success:
|
|
# Add peer to WireGuard server config (non-fatal if WG is not running)
|
|
wg_allowed = f"{assigned_ip}/32" if '/' not in assigned_ip else assigned_ip
|
|
try:
|
|
wireguard_manager.add_peer(peer_name, data['public_key'], endpoint_ip='', allowed_ips=wg_allowed)
|
|
except Exception as wg_err:
|
|
logger.warning(f"Peer {peer_name}: WireGuard server config update failed (non-fatal): {wg_err}")
|
|
# Apply server-side enforcement immediately
|
|
firewall_manager.apply_peer_rules(peer_info['ip'], peer_info)
|
|
firewall_manager.apply_all_dns_rules(peer_registry.list_peers(), COREFILE_PATH, _configured_domain())
|
|
return jsonify({"message": f"Peer {peer_name} added successfully", "ip": assigned_ip}), 201
|
|
else:
|
|
# Registry rejected (already exists) — rollback provisioned accounts
|
|
for svc in ('files', 'calendar', 'email', 'auth'):
|
|
try:
|
|
if svc == 'files':
|
|
file_manager.delete_user(peer_name)
|
|
elif svc == 'calendar':
|
|
calendar_manager.delete_calendar_user(peer_name)
|
|
elif svc == 'email':
|
|
email_manager.delete_email_user(peer_name)
|
|
elif svc == 'auth':
|
|
auth_manager.delete_user(peer_name)
|
|
except Exception:
|
|
pass
|
|
return jsonify({"error": f"Peer {peer_name} already exists"}), 400
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error adding peer: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
|
|
@app.route('/api/peers/<peer_name>', methods=['PUT'])
|
|
def update_peer(peer_name):
|
|
"""Update peer settings. Marks config_needs_reinstall if VPN config changed."""
|
|
try:
|
|
data = request.get_json(silent=True) or {}
|
|
existing = peer_registry.get_peer(peer_name)
|
|
if not existing:
|
|
return jsonify({"error": "Peer not found"}), 404
|
|
|
|
# Detect changes that require client to reinstall tunnel config
|
|
config_changed = (
|
|
('internet_access' in data and data['internet_access'] != existing.get('internet_access', True)) or
|
|
('ip' in data and data['ip'] != existing.get('ip')) or
|
|
('persistent_keepalive' in data and data['persistent_keepalive'] != existing.get('persistent_keepalive'))
|
|
)
|
|
|
|
updates = {k: v for k, v in data.items()}
|
|
if config_changed:
|
|
updates['config_needs_reinstall'] = True
|
|
|
|
success = peer_registry.update_peer(peer_name, updates)
|
|
if success:
|
|
# Re-apply server-side enforcement with updated settings
|
|
updated_peer = peer_registry.get_peer(peer_name)
|
|
if updated_peer:
|
|
firewall_manager.apply_peer_rules(updated_peer['ip'], updated_peer)
|
|
firewall_manager.apply_all_dns_rules(peer_registry.list_peers(), COREFILE_PATH, _configured_domain())
|
|
result = {"message": f"Peer {peer_name} updated", "config_changed": config_changed}
|
|
return jsonify(result)
|
|
else:
|
|
return jsonify({"error": "Update failed"}), 500
|
|
except Exception as e:
|
|
logger.error(f"Error updating peer {peer_name}: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
|
|
@app.route('/api/peers/<peer_name>/clear-reinstall', methods=['POST'])
|
|
def clear_peer_reinstall(peer_name):
|
|
"""Clear the config_needs_reinstall flag once user has downloaded new config."""
|
|
try:
|
|
peer_registry.clear_reinstall_flag(peer_name)
|
|
return jsonify({"message": "Reinstall flag cleared"})
|
|
except Exception as e:
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
|
|
@app.route('/api/peers/<peer_name>', methods=['DELETE'])
|
|
def remove_peer(peer_name):
|
|
"""Remove a peer and clean up firewall, DNS, and all service accounts."""
|
|
try:
|
|
peer = peer_registry.get_peer(peer_name)
|
|
if not peer:
|
|
return jsonify({"message": f"Peer {peer_name} not found or already removed"})
|
|
peer_ip = peer.get('ip')
|
|
peer_pubkey = peer.get('public_key', '')
|
|
success = peer_registry.remove_peer(peer_name)
|
|
if success:
|
|
if peer_ip:
|
|
firewall_manager.clear_peer_rules(peer_ip)
|
|
firewall_manager.apply_all_dns_rules(peer_registry.list_peers(), COREFILE_PATH, _configured_domain())
|
|
# Remove peer from WireGuard server config (non-fatal)
|
|
if peer_pubkey:
|
|
try:
|
|
wireguard_manager.remove_peer(peer_pubkey)
|
|
except Exception as wg_err:
|
|
logger.warning(f"Peer {peer_name}: WireGuard removal failed (non-fatal): {wg_err}")
|
|
# Clean up all provisioned service accounts (best-effort)
|
|
for _cleanup in [
|
|
lambda: email_manager.delete_email_user(peer_name),
|
|
lambda: calendar_manager.delete_calendar_user(peer_name),
|
|
lambda: file_manager.delete_user(peer_name),
|
|
lambda: auth_manager.delete_user(peer_name),
|
|
]:
|
|
try:
|
|
_cleanup()
|
|
except Exception:
|
|
pass
|
|
return jsonify({"message": f"Peer {peer_name} removed successfully"})
|
|
except Exception as e:
|
|
logger.error(f"Error removing peer: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/peers/register', methods=['POST'])
|
|
def register_peer():
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
if data is None:
|
|
return jsonify({"error": "No data provided"}), 400
|
|
result = peer_registry.register_peer(data)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error registering peer: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/peers/<peer_name>/unregister', methods=['DELETE'])
|
|
def unregister_peer(peer_name):
|
|
"""Unregister a peer."""
|
|
try:
|
|
result = peer_registry.unregister_peer(peer_name)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error unregistering peer: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/peers/<peer_name>/update-ip', methods=['PUT'])
|
|
def update_peer_ip_registry(peer_name):
|
|
"""Update peer IP."""
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
new_ip = data.get('ip') if data else None
|
|
if not new_ip:
|
|
return jsonify({"error": "Missing ip"}), 400
|
|
success = peer_registry.update_peer_ip(peer_name, new_ip)
|
|
if success:
|
|
# Update routing and WireGuard configs
|
|
try:
|
|
routing_manager.update_peer_ip(peer_name, new_ip)
|
|
except Exception as e:
|
|
logger.warning(f"RoutingManager update_peer_ip failed: {e}")
|
|
try:
|
|
# For now, skip WireGuard update - method not implemented
|
|
logger.warning(f"WireGuardManager update_peer_ip not implemented yet")
|
|
except Exception as e:
|
|
logger.warning(f"WireGuardManager update_peer_ip failed: {e}")
|
|
return jsonify({"message": f"IP update received for {peer_name}"})
|
|
else:
|
|
return jsonify({"error": f"Peer {peer_name} not found"}), 404
|
|
except Exception as e:
|
|
logger.error(f"Error updating peer IP: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/ip-update', methods=['POST'])
|
|
def ip_update():
|
|
"""Handle IP update from peer."""
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
if data is None:
|
|
return jsonify({"error": "No data provided"}), 400
|
|
peer_name = data.get('peer')
|
|
new_ip = data.get('ip')
|
|
if not peer_name or not new_ip:
|
|
return jsonify({"error": "Missing peer or ip"}), 400
|
|
success = peer_registry.update_peer_ip(peer_name, new_ip)
|
|
if success:
|
|
# Update routing and WireGuard configs
|
|
try:
|
|
routing_manager.update_peer_ip(peer_name, new_ip)
|
|
except Exception as e:
|
|
logger.warning(f"RoutingManager update_peer_ip failed: {e}")
|
|
try:
|
|
# For now, skip WireGuard update - method not implemented
|
|
logger.warning(f"WireGuardManager update_peer_ip not implemented yet")
|
|
except Exception as e:
|
|
logger.warning(f"WireGuardManager update_peer_ip failed: {e}")
|
|
return jsonify({"message": f"IP update received for {peer_name}"})
|
|
else:
|
|
return jsonify({"error": f"Peer {peer_name} not found"}), 404
|
|
except Exception as e:
|
|
logger.error(f"Error handling IP update: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
# Email Services API
|
|
@app.route('/api/email/users', methods=['GET'])
|
|
def get_email_users():
|
|
"""Get email users."""
|
|
try:
|
|
users = email_manager.get_users()
|
|
return jsonify(users)
|
|
except Exception as e:
|
|
logger.error(f"Error getting email users: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/email/users', methods=['POST'])
|
|
def create_email_user():
|
|
"""Create email user."""
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
if data is None:
|
|
return jsonify({"error": "No data provided"}), 400
|
|
result = email_manager.create_user(data)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error creating email user: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/email/users/<username>', methods=['DELETE'])
|
|
def delete_email_user(username):
|
|
"""Delete email user."""
|
|
try:
|
|
result = email_manager.delete_user(username)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error deleting email user: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/email/status', methods=['GET'])
|
|
def get_email_status():
|
|
"""Get email service status."""
|
|
try:
|
|
status = email_manager.get_status()
|
|
return jsonify(status)
|
|
except Exception as e:
|
|
logger.error(f"Error getting email status: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/email/connectivity', methods=['GET'])
|
|
def test_email_connectivity():
|
|
"""Test email connectivity."""
|
|
try:
|
|
result = email_manager.test_connectivity()
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error testing email connectivity: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/email/send', methods=['POST'])
|
|
def send_email():
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
if data is None:
|
|
return jsonify({"error": "No data provided"}), 400
|
|
result = email_manager.send_email(data)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error sending email: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/email/mailbox/<username>', methods=['GET'])
|
|
def get_mailbox_info(username):
|
|
"""Get mailbox information."""
|
|
try:
|
|
result = email_manager.get_mailbox_info(username)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error getting mailbox info: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
# Calendar Services API
|
|
@app.route('/api/calendar/users', methods=['GET'])
|
|
def get_calendar_users():
|
|
"""Get calendar users."""
|
|
try:
|
|
users = calendar_manager.get_users()
|
|
return jsonify(users)
|
|
except Exception as e:
|
|
logger.error(f"Error getting calendar users: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/calendar/users', methods=['POST'])
|
|
def create_calendar_user():
|
|
"""Create calendar user."""
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
if data is None:
|
|
return jsonify({"error": "No data provided"}), 400
|
|
result = calendar_manager.create_user(data)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error creating calendar user: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/calendar/users/<username>', methods=['DELETE'])
|
|
def delete_calendar_user(username):
|
|
"""Delete calendar user."""
|
|
try:
|
|
result = calendar_manager.delete_user(username)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error deleting calendar user: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/calendar/calendars', methods=['POST'])
|
|
def create_calendar():
|
|
"""Create calendar."""
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
if data is None:
|
|
return jsonify({"error": "No data provided"}), 400
|
|
result = calendar_manager.create_calendar(data)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error creating calendar: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/calendar/events', methods=['POST'])
|
|
def add_calendar_event():
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
if data is None:
|
|
return jsonify({"error": "No data provided"}), 400
|
|
result = calendar_manager.add_event(data)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error adding calendar event: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/calendar/events/<username>/<calendar_name>', methods=['GET'])
|
|
def get_calendar_events(username, calendar_name):
|
|
"""Get calendar events."""
|
|
try:
|
|
params = request.args.to_dict()
|
|
result = calendar_manager.get_events(username, calendar_name, params)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error getting calendar events: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/calendar/status', methods=['GET'])
|
|
def get_calendar_status():
|
|
"""Get calendar service status."""
|
|
try:
|
|
status = calendar_manager.get_status()
|
|
return jsonify(status)
|
|
except Exception as e:
|
|
logger.error(f"Error getting calendar status: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/calendar/connectivity', methods=['GET'])
|
|
def test_calendar_connectivity():
|
|
"""Test calendar connectivity."""
|
|
try:
|
|
result = calendar_manager.test_connectivity()
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error testing calendar connectivity: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
# File Services API
|
|
@app.route('/api/files/users', methods=['GET'])
|
|
def get_file_users():
|
|
"""Get file storage users."""
|
|
try:
|
|
users = file_manager.get_users()
|
|
return jsonify(users)
|
|
except Exception as e:
|
|
logger.error(f"Error getting file users: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/files/users', methods=['POST'])
|
|
def create_file_user():
|
|
"""Create file storage user."""
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
if data is None:
|
|
return jsonify({"error": "No data provided"}), 400
|
|
result = file_manager.create_user(data)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error creating file user: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/files/users/<username>', methods=['DELETE'])
|
|
def delete_file_user(username):
|
|
"""Delete file storage user."""
|
|
try:
|
|
result = file_manager.delete_user(username)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error deleting file user: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/files/folders', methods=['POST'])
|
|
def create_folder():
|
|
"""Create folder."""
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
if data is None:
|
|
return jsonify({"error": "No data provided"}), 400
|
|
result = file_manager.create_folder(data)
|
|
return jsonify(result)
|
|
except ValueError as e:
|
|
return jsonify({"error": str(e)}), 400
|
|
except Exception as e:
|
|
logger.error(f"Error creating folder: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/files/folders/<username>/<path:folder_path>', methods=['DELETE'])
|
|
def delete_folder(username, folder_path):
|
|
"""Delete folder."""
|
|
try:
|
|
result = file_manager.delete_folder(username, folder_path)
|
|
return jsonify(result)
|
|
except ValueError as e:
|
|
return jsonify({"error": str(e)}), 400
|
|
except Exception as e:
|
|
logger.error(f"Error deleting folder: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/files/upload/<username>', methods=['POST'])
|
|
def upload_file(username):
|
|
"""Upload file."""
|
|
try:
|
|
if 'file' not in request.files:
|
|
return jsonify({"error": "No file provided"}), 400
|
|
|
|
file = request.files['file']
|
|
path = request.form.get('path', '')
|
|
|
|
result = file_manager.upload_file(username, file, path)
|
|
return jsonify(result)
|
|
except ValueError as e:
|
|
return jsonify({"error": str(e)}), 400
|
|
except Exception as e:
|
|
logger.error(f"Error uploading file: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/files/download/<username>/<path:file_path>', methods=['GET'])
|
|
def download_file(username, file_path):
|
|
"""Download file."""
|
|
try:
|
|
result = file_manager.download_file(username, file_path)
|
|
return jsonify(result)
|
|
except ValueError as e:
|
|
return jsonify({"error": str(e)}), 400
|
|
except Exception as e:
|
|
logger.error(f"Error downloading file: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/files/delete/<username>/<path:file_path>', methods=['DELETE'])
|
|
def delete_file(username, file_path):
|
|
"""Delete file."""
|
|
try:
|
|
result = file_manager.delete_file(username, file_path)
|
|
return jsonify(result)
|
|
except ValueError as e:
|
|
return jsonify({"error": str(e)}), 400
|
|
except Exception as e:
|
|
logger.error(f"Error deleting file: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/files/list/<username>', methods=['GET'])
|
|
def list_files(username):
|
|
"""List files."""
|
|
try:
|
|
folder = request.args.get('folder', '')
|
|
result = file_manager.list_files(username, folder)
|
|
return jsonify(result)
|
|
except ValueError as e:
|
|
return jsonify({"error": str(e)}), 400
|
|
except Exception as e:
|
|
logger.error(f"Error listing files: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/files/status', methods=['GET'])
|
|
def get_file_status():
|
|
"""Get file service status."""
|
|
try:
|
|
status = file_manager.get_status()
|
|
return jsonify(status)
|
|
except Exception as e:
|
|
logger.error(f"Error getting file status: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/files/connectivity', methods=['GET'])
|
|
def test_file_connectivity():
|
|
"""Test file service connectivity."""
|
|
try:
|
|
result = file_manager.test_connectivity()
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error testing file connectivity: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
# Routing API
|
|
@app.route('/api/routing/status', methods=['GET'])
|
|
def get_routing_status():
|
|
"""Get routing status."""
|
|
try:
|
|
status = routing_manager.get_status()
|
|
return jsonify(status)
|
|
except Exception as e:
|
|
logger.error(f"Error getting routing status: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/routing/setup', methods=['POST'])
|
|
def setup_routing():
|
|
"""Apply/verify routing setup (WireGuard handles NAT via PostUp rules)."""
|
|
try:
|
|
status = routing_manager.get_status()
|
|
return jsonify({'success': True, 'message': 'Routing managed by WireGuard PostUp rules', **status})
|
|
except Exception as e:
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/routing/nat', methods=['POST'])
|
|
def add_nat_rule():
|
|
"""Add NAT rule.
|
|
JSON fields:
|
|
- source_network (CIDR)
|
|
- target_interface (str)
|
|
- masquerade (bool, default True)
|
|
- nat_type (MASQUERADE, SNAT, DNAT)
|
|
- protocol (TCP, UDP, ALL)
|
|
- external_port (str, optional)
|
|
- internal_ip (str, optional)
|
|
- internal_port (str, optional)
|
|
"""
|
|
try:
|
|
data = request.get_json(silent=True) or {}
|
|
result = routing_manager.add_nat_rule(
|
|
source_network=data.get('source_network'),
|
|
target_interface=data.get('target_interface'),
|
|
masquerade=data.get('masquerade', True),
|
|
nat_type=data.get('nat_type', 'MASQUERADE'),
|
|
protocol=data.get('protocol', 'ALL'),
|
|
external_port=data.get('external_port'),
|
|
internal_ip=data.get('internal_ip'),
|
|
internal_port=data.get('internal_port')
|
|
)
|
|
return jsonify({'success': result})
|
|
except Exception as e:
|
|
logger.error(f"Error adding NAT rule: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/routing/nat/<rule_id>', methods=['DELETE'])
|
|
def remove_nat_rule(rule_id):
|
|
"""Remove NAT rule."""
|
|
try:
|
|
result = routing_manager.remove_nat_rule(rule_id)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error removing NAT rule: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/routing/peers', methods=['POST'])
|
|
def add_peer_route():
|
|
"""Add peer route."""
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
result = routing_manager.add_peer_route(data)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error adding peer route: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/routing/peers/<peer_name>', methods=['DELETE'])
|
|
def remove_peer_route(peer_name):
|
|
"""Remove peer route."""
|
|
try:
|
|
result = routing_manager.remove_peer_route(peer_name)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error removing peer route: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/routing/exit-nodes', methods=['POST'])
|
|
def add_exit_node():
|
|
"""Add exit node."""
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
result = routing_manager.add_exit_node(data)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error adding exit node: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/routing/bridge', methods=['POST'])
|
|
def add_bridge_route():
|
|
"""Add bridge route."""
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
result = routing_manager.add_bridge_route(data)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error adding bridge route: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/routing/split', methods=['POST'])
|
|
def add_split_route():
|
|
"""Add split route."""
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
result = routing_manager.add_split_route(data)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error adding split route: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/routing/firewall', methods=['POST'])
|
|
def add_firewall_rule():
|
|
"""Add firewall rule.
|
|
JSON fields:
|
|
- rule_type (INPUT, OUTPUT, FORWARD)
|
|
- source (CIDR)
|
|
- destination (CIDR)
|
|
- action (ACCEPT, DROP, REJECT)
|
|
- protocol (TCP, UDP, ICMP, ALL)
|
|
- port (str, optional)
|
|
- port_range (str, optional, e.g. '1000-2000')
|
|
"""
|
|
try:
|
|
data = request.get_json(silent=True) or {}
|
|
result = routing_manager.add_firewall_rule(
|
|
rule_type=data.get('rule_type'),
|
|
source=data.get('source'),
|
|
destination=data.get('destination'),
|
|
action=data.get('action', 'ACCEPT'),
|
|
port=data.get('port'),
|
|
protocol=data.get('protocol', 'ALL'),
|
|
port_range=data.get('port_range')
|
|
)
|
|
return jsonify({'success': result})
|
|
except Exception as e:
|
|
logger.error(f"Error adding firewall rule: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/routing/firewall/<rule_id>', methods=['DELETE'])
|
|
def remove_firewall_rule(rule_id):
|
|
try:
|
|
result = routing_manager.remove_firewall_rule(rule_id)
|
|
return jsonify({'success': result}), (200 if result else 404)
|
|
except Exception as e:
|
|
return jsonify({'error': str(e)}), 500
|
|
|
|
@app.route('/api/routing/live-iptables', methods=['GET'])
|
|
def get_live_iptables():
|
|
try:
|
|
return jsonify(routing_manager.get_live_iptables())
|
|
except Exception as e:
|
|
return jsonify({'error': str(e)}), 500
|
|
|
|
@app.route('/api/routing/connectivity', methods=['POST'])
|
|
def test_routing_connectivity():
|
|
"""Test routing connectivity."""
|
|
try:
|
|
data = request.get_json(silent=True) or {}
|
|
target_ip = data.get('target_ip', '8.8.8.8')
|
|
via_peer = data.get('via_peer')
|
|
result = routing_manager.test_routing_connectivity(target_ip, via_peer)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error testing routing connectivity: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/routing/logs', methods=['GET'])
|
|
def get_routing_logs():
|
|
"""Get routing logs."""
|
|
try:
|
|
lines = request.args.get('lines', 50, type=int)
|
|
result = routing_manager.get_logs(lines)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error getting routing logs: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/routing/nat', methods=['GET'])
|
|
def get_nat_rules():
|
|
"""Get all NAT rules."""
|
|
try:
|
|
rules = routing_manager.get_nat_rules()
|
|
return jsonify({"nat_rules": rules})
|
|
except Exception as e:
|
|
logger.error(f"Error getting NAT rules: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/routing/peers', methods=['GET'])
|
|
def get_peer_routes():
|
|
"""Get all peer routes."""
|
|
try:
|
|
routes = routing_manager.get_peer_routes()
|
|
return jsonify({"peer_routes": routes})
|
|
except Exception as e:
|
|
logger.error(f"Error getting peer routes: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/routing/firewall', methods=['GET'])
|
|
def get_firewall_rules():
|
|
"""Get all firewall rules."""
|
|
try:
|
|
rules = routing_manager.get_firewall_rules()
|
|
return jsonify({"firewall_rules": rules})
|
|
except Exception as e:
|
|
logger.error(f"Error getting firewall rules: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
# Vault & Trust API (Phase 6)
|
|
@app.route('/api/vault/status', methods=['GET'])
|
|
def get_vault_status():
|
|
"""Get vault status."""
|
|
try:
|
|
status = current_app.vault_manager.get_status()
|
|
return jsonify(status)
|
|
except Exception as e:
|
|
logger.error(f"Error getting vault status: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/vault/certificates', methods=['GET'])
|
|
def get_certificates():
|
|
"""Get all certificates."""
|
|
try:
|
|
certificates = current_app.vault_manager.list_certificates()
|
|
return jsonify(certificates)
|
|
except Exception as e:
|
|
logger.error(f"Error getting certificates: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/vault/certificates', methods=['POST'])
|
|
def generate_certificate():
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
if data is None:
|
|
return jsonify({"error": "No data provided"}), 400
|
|
result = current_app.vault_manager.generate_certificate(
|
|
common_name=data['common_name'],
|
|
domains=data.get('domains', []),
|
|
key_size=data.get('key_size', 2048),
|
|
days=data.get('days', 365)
|
|
)
|
|
return jsonify(result)
|
|
except Exception as e:
|
|
logger.error(f"Error generating certificate: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/vault/certificates/<common_name>', methods=['DELETE'])
|
|
def revoke_certificate(common_name):
|
|
"""Revoke certificate."""
|
|
try:
|
|
result = current_app.vault_manager.revoke_certificate(common_name)
|
|
return jsonify({"revoked": result})
|
|
except Exception as e:
|
|
logger.error(f"Error revoking certificate: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/vault/ca/certificate', methods=['GET'])
|
|
def get_ca_certificate():
|
|
"""Get CA certificate."""
|
|
try:
|
|
cert = current_app.vault_manager.get_ca_certificate()
|
|
return jsonify({"certificate": cert})
|
|
except Exception as e:
|
|
logger.error(f"Error getting CA certificate: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/vault/age/public-key', methods=['GET'])
|
|
def get_age_public_key():
|
|
"""Get Age public key."""
|
|
try:
|
|
key = current_app.vault_manager.get_age_public_key()
|
|
return jsonify({"public_key": key})
|
|
except Exception as e:
|
|
logger.error(f"Error getting Age public key: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/vault/trust/keys', methods=['GET'])
|
|
def get_trusted_keys():
|
|
"""Get trusted keys."""
|
|
try:
|
|
keys = current_app.vault_manager.get_trusted_keys()
|
|
return jsonify(keys)
|
|
except Exception as e:
|
|
logger.error(f"Error getting trusted keys: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/vault/trust/keys', methods=['POST'])
|
|
def add_trusted_key():
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
if data is None:
|
|
return jsonify({"error": "No data provided"}), 400
|
|
result = current_app.vault_manager.add_trusted_key(
|
|
name=data['name'],
|
|
public_key=data['public_key'],
|
|
trust_level=data.get('trust_level', 'direct')
|
|
)
|
|
return jsonify({"added": result})
|
|
except Exception as e:
|
|
logger.error(f"Error adding trusted key: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/vault/trust/keys/<name>', methods=['DELETE'])
|
|
def remove_trusted_key(name):
|
|
"""Remove trusted key."""
|
|
try:
|
|
result = current_app.vault_manager.remove_trusted_key(name)
|
|
return jsonify({"removed": result})
|
|
except Exception as e:
|
|
logger.error(f"Error removing trusted key: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/vault/trust/verify', methods=['POST'])
|
|
def verify_trust_chain():
|
|
try:
|
|
data = request.get_json(silent=True)
|
|
if data is None:
|
|
return jsonify({"error": "No data provided"}), 400
|
|
result = current_app.vault_manager.verify_trust_chain(
|
|
peer_name=data['peer_name'],
|
|
signature=data['signature'],
|
|
data=data['data']
|
|
)
|
|
return jsonify({"verified": result})
|
|
except Exception as e:
|
|
logger.error(f"Error verifying trust chain: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/vault/trust/chains', methods=['GET'])
|
|
def get_trust_chains():
|
|
"""Get trust chains."""
|
|
try:
|
|
chains = current_app.vault_manager.get_trust_chains()
|
|
return jsonify(chains)
|
|
except Exception as e:
|
|
logger.error(f"Error getting trust chains: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
# Services API
|
|
@app.route('/api/services/status', methods=['GET'])
|
|
def get_all_services_status():
|
|
"""Get status of all services."""
|
|
try:
|
|
# Use service bus to get status from all services
|
|
services_status = {}
|
|
for service_name in service_bus.list_services():
|
|
try:
|
|
service = service_bus.get_service(service_name)
|
|
status = service.get_status()
|
|
|
|
# Clean up status for UI consumption
|
|
if isinstance(status, dict):
|
|
# Extract core status information
|
|
clean_status = {
|
|
'status': status.get('status', 'unknown'),
|
|
'running': status.get('running', False),
|
|
'timestamp': status.get('timestamp', datetime.utcnow().isoformat())
|
|
}
|
|
|
|
# Add service-specific metrics
|
|
if service_name == 'network':
|
|
clean_status.update({
|
|
'dns_status': status.get('dns_running', False),
|
|
'dhcp_status': status.get('dhcp_running', False),
|
|
'ntp_status': status.get('ntp_running', False)
|
|
})
|
|
elif service_name == 'wireguard':
|
|
clean_status.update({
|
|
'peers_count': status.get('peers_count', 0),
|
|
'interface': status.get('interface', 'unknown')
|
|
})
|
|
elif service_name == 'email':
|
|
clean_status.update({
|
|
'users_count': status.get('users_count', 0),
|
|
'domain': status.get('domain', 'unknown')
|
|
})
|
|
elif service_name == 'calendar':
|
|
clean_status.update({
|
|
'users_count': status.get('users_count', 0),
|
|
'calendars_count': status.get('calendars_count', 0)
|
|
})
|
|
elif service_name == 'files':
|
|
clean_status.update({
|
|
'users_count': status.get('users_count', 0),
|
|
'storage_used': status.get('total_storage_used', {})
|
|
})
|
|
elif service_name == 'routing':
|
|
clean_status.update({
|
|
'nat_rules_count': status.get('nat_rules_count', 0),
|
|
'peer_routes_count': status.get('peer_routes_count', 0),
|
|
'firewall_rules_count': status.get('firewall_rules_count', 0)
|
|
})
|
|
elif service_name == 'vault':
|
|
clean_status.update({
|
|
'certificates_count': status.get('certificates_count', 0),
|
|
'trusted_keys_count': status.get('trusted_keys_count', 0)
|
|
})
|
|
|
|
services_status[service_name] = clean_status
|
|
else:
|
|
services_status[service_name] = {'status': str(status), 'running': bool(status)}
|
|
|
|
except Exception as e:
|
|
services_status[service_name] = {'error': str(e), 'status': 'offline', 'running': False}
|
|
|
|
return jsonify({
|
|
"network": services_status.get('network', {}),
|
|
"wireguard": services_status.get('wireguard', {}),
|
|
"email": services_status.get('email', {}),
|
|
"calendar": services_status.get('calendar', {}),
|
|
"files": services_status.get('files', {}),
|
|
"routing": services_status.get('routing', {}),
|
|
"vault": services_status.get('vault', {}),
|
|
"timestamp": datetime.utcnow().isoformat()
|
|
})
|
|
except Exception as e:
|
|
logger.error(f"Error getting all services status: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/services/connectivity', methods=['GET'])
|
|
def test_all_services_connectivity():
|
|
"""Test connectivity of all services."""
|
|
try:
|
|
# Use service bus to test connectivity
|
|
connectivity_results = {}
|
|
for service_name in service_bus.list_services():
|
|
try:
|
|
service = service_bus.get_service(service_name)
|
|
if hasattr(service, 'test_connectivity'):
|
|
connectivity_results[service_name] = service.test_connectivity()
|
|
else:
|
|
connectivity_results[service_name] = {'status': 'ok', 'message': 'No connectivity test available'}
|
|
except Exception as e:
|
|
connectivity_results[service_name] = {'status': 'error', 'message': str(e)}
|
|
|
|
return jsonify({
|
|
"network": connectivity_results.get('network', {}),
|
|
"wireguard": connectivity_results.get('wireguard', {}),
|
|
"email": connectivity_results.get('email', {}),
|
|
"calendar": connectivity_results.get('calendar', {}),
|
|
"files": connectivity_results.get('files', {}),
|
|
"routing": connectivity_results.get('routing', {}),
|
|
"timestamp": datetime.utcnow().isoformat()
|
|
})
|
|
except Exception as e:
|
|
logger.error(f"Error testing all services connectivity: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/health/history', methods=['GET'])
|
|
def get_health_history():
|
|
"""Get recent unified health check results."""
|
|
return jsonify(list(health_history))
|
|
|
|
@app.route('/api/health/history/clear', methods=['POST'])
|
|
def clear_health_history():
|
|
"""Clear health history and reset alert counters."""
|
|
global service_alert_counters
|
|
health_history.clear()
|
|
service_alert_counters = {}
|
|
return jsonify({'message': 'Health history cleared'})
|
|
|
|
@app.route('/api/logs', methods=['GET'])
|
|
def get_backend_logs():
|
|
"""Get backend log file contents (last N lines)."""
|
|
log_file = os.path.join(os.path.dirname(__file__), 'picell.log')
|
|
lines = int(request.args.get('lines', 100))
|
|
try:
|
|
if not os.path.exists(log_file):
|
|
return jsonify({"error": "Log file not found."}), 404
|
|
with open(log_file, 'r', encoding='utf-8', errors='ignore') as f:
|
|
all_lines = f.readlines()
|
|
tail_lines = all_lines[-lines:] if lines > 0 else all_lines
|
|
return jsonify({"log": ''.join(tail_lines)})
|
|
except Exception as e:
|
|
logger.error(f"Error reading log file: {e}")
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
@app.route('/api/containers', methods=['GET'])
|
|
def list_containers():
|
|
if not is_local_request():
|
|
return jsonify({'error': 'Access denied'}), 403
|
|
try:
|
|
containers = container_manager.list_containers()
|
|
return jsonify(containers)
|
|
except Exception as e:
|
|
logger.error(f"Error listing containers: {e}")
|
|
return jsonify({'error': str(e)}), 500
|
|
|
|
@app.route('/api/containers/<name>/start', methods=['POST'])
|
|
def start_container(name):
|
|
if not is_local_request():
|
|
return jsonify({'error': 'Access denied'}), 403
|
|
try:
|
|
success = container_manager.start_container(name)
|
|
return jsonify({'started': success})
|
|
except Exception as e:
|
|
logger.error(f"Error starting container {name}: {e}")
|
|
return jsonify({'error': str(e)}), 500
|
|
|
|
@app.route('/api/containers/<name>/stop', methods=['POST'])
|
|
def stop_container(name):
|
|
if not is_local_request():
|
|
return jsonify({'error': 'Access denied'}), 403
|
|
try:
|
|
success = container_manager.stop_container(name)
|
|
return jsonify({'stopped': success})
|
|
except Exception as e:
|
|
logger.error(f"Error stopping container {name}: {e}")
|
|
return jsonify({'error': str(e)}), 500
|
|
|
|
@app.route('/api/containers/<name>/restart', methods=['POST'])
|
|
def restart_container(name):
|
|
if not is_local_request():
|
|
return jsonify({'error': 'Access denied'}), 403
|
|
try:
|
|
success = container_manager.restart_container(name)
|
|
return jsonify({'restarted': success})
|
|
except Exception as e:
|
|
logger.error(f"Error restarting container {name}: {e}")
|
|
return jsonify({'error': str(e)}), 500
|
|
|
|
@app.route('/api/containers/<name>/logs', methods=['GET'])
|
|
def get_container_logs(name):
|
|
# Temporarily disable access control for debugging
|
|
if not is_local_request():
|
|
return jsonify({'error': 'Access denied'}), 403
|
|
tail = request.args.get('tail', default=100, type=int)
|
|
try:
|
|
logs = container_manager.get_container_logs(name, tail=tail)
|
|
return jsonify({'logs': logs})
|
|
except Exception as e:
|
|
logger.error(f"Error getting logs for container {name}: {e}")
|
|
return jsonify({'error': str(e)}), 500
|
|
|
|
@app.route('/api/containers/<name>/stats', methods=['GET'])
|
|
def get_container_stats(name):
|
|
# Temporarily disable access control for debugging
|
|
if not is_local_request():
|
|
return jsonify({'error': 'Access denied'}), 403
|
|
try:
|
|
stats = container_manager.get_container_stats(name)
|
|
return jsonify(stats)
|
|
except Exception as e:
|
|
logger.error(f"Error getting stats for container {name}: {e}")
|
|
return jsonify({'error': str(e)}), 500
|
|
|
|
@app.route('/api/vault/secrets', methods=['GET'])
|
|
def list_secrets():
|
|
# Temporarily disable access control for debugging
|
|
if not is_local_request():
|
|
return jsonify({'error': 'Access denied'}), 403
|
|
secrets = app.vault_manager.list_secrets()
|
|
return jsonify({'secrets': secrets})
|
|
|
|
@app.route('/api/vault/secrets', methods=['POST'])
|
|
def store_secret():
|
|
# Temporarily disable access control for debugging
|
|
if not is_local_request():
|
|
return jsonify({'error': 'Access denied'}), 403
|
|
data = request.get_json(silent=True)
|
|
if not data or 'name' not in data or 'value' not in data:
|
|
return jsonify({'error': 'Missing name or value'}), 400
|
|
app.vault_manager.store_secret(data['name'], data['value'])
|
|
return jsonify({'stored': True})
|
|
|
|
@app.route('/api/vault/secrets/<name>', methods=['GET'])
|
|
def get_secret(name):
|
|
# Temporarily disable access control for debugging
|
|
if not is_local_request():
|
|
return jsonify({'error': 'Access denied'}), 403
|
|
value = app.vault_manager.get_secret(name)
|
|
if value is None:
|
|
return jsonify({'error': 'Not found'}), 404
|
|
return jsonify({'name': name, 'value': value})
|
|
|
|
@app.route('/api/vault/secrets/<name>', methods=['DELETE'])
|
|
def delete_secret(name):
|
|
# Temporarily disable access control for debugging
|
|
if not is_local_request():
|
|
return jsonify({'error': 'Access denied'}), 403
|
|
result = app.vault_manager.delete_secret(name)
|
|
return jsonify({'deleted': result})
|
|
|
|
# Enhance container creation to support secrets
|
|
@app.route('/api/containers', methods=['POST'])
|
|
def create_container():
|
|
# Temporarily disable access control for debugging
|
|
if not is_local_request():
|
|
return jsonify({'error': 'Access denied'}), 403
|
|
data = request.get_json(silent=True)
|
|
if not data or 'image' not in data:
|
|
return jsonify({'error': 'Missing image parameter'}), 400
|
|
name = data.get('name', '')
|
|
env = data.get('env', {})
|
|
# If 'secrets' is provided, resolve secret values and add to env
|
|
secrets = data.get('secrets', [])
|
|
if secrets:
|
|
for secret_name in secrets:
|
|
secret_value = app.vault_manager.get_secret(secret_name)
|
|
if secret_value is not None:
|
|
env[secret_name] = secret_value
|
|
volumes = data.get('volumes', {})
|
|
command = data.get('command', '')
|
|
ports = data.get('ports', {})
|
|
result = container_manager.create_container(
|
|
image=data['image'],
|
|
name=name,
|
|
env=env,
|
|
volumes=volumes,
|
|
command=command,
|
|
ports=ports
|
|
)
|
|
if 'error' in result:
|
|
return jsonify(result), 500
|
|
return jsonify(result)
|
|
|
|
@app.route('/api/containers/<name>', methods=['DELETE'])
|
|
def remove_container(name):
|
|
# Temporarily disable access control for debugging
|
|
if not is_local_request():
|
|
return jsonify({'error': 'Access denied'}), 403
|
|
force = request.args.get('force', default=False, type=bool)
|
|
success = container_manager.remove_container(name, force=force)
|
|
return jsonify({'removed': success})
|
|
|
|
@app.route('/api/images', methods=['GET'])
|
|
def list_images():
|
|
# Temporarily disable access control for debugging
|
|
if not is_local_request():
|
|
return jsonify({'error': 'Access denied'}), 403
|
|
images = container_manager.list_images()
|
|
return jsonify(images)
|
|
|
|
@app.route('/api/images/pull', methods=['POST'])
|
|
def pull_image():
|
|
if not is_local_request():
|
|
return jsonify({'error': 'Access denied'}), 403
|
|
data = request.get_json(silent=True)
|
|
if not data or 'image' not in data:
|
|
return jsonify({'error': 'Missing image parameter'}), 400
|
|
result = container_manager.pull_image(data['image'])
|
|
if 'error' in result:
|
|
return jsonify(result), 500
|
|
return jsonify(result)
|
|
|
|
@app.route('/api/images/<image>', methods=['DELETE'])
|
|
def remove_image(image):
|
|
if not is_local_request():
|
|
return jsonify({'error': 'Access denied'}), 403
|
|
force = request.args.get('force', default=False, type=bool)
|
|
success = container_manager.remove_image(image, force=force)
|
|
return jsonify({'removed': success})
|
|
|
|
@app.route('/api/volumes', methods=['GET'])
|
|
def list_volumes():
|
|
# Temporarily disable access control for debugging
|
|
if not is_local_request():
|
|
return jsonify({'error': 'Access denied'}), 403
|
|
volumes = container_manager.list_volumes()
|
|
return jsonify(volumes)
|
|
|
|
@app.route('/api/volumes', methods=['POST'])
|
|
def create_volume():
|
|
if not is_local_request():
|
|
return jsonify({'error': 'Access denied'}), 403
|
|
data = request.get_json(silent=True)
|
|
if not data or 'name' not in data:
|
|
return jsonify({'error': 'Missing name parameter'}), 400
|
|
result = container_manager.create_volume(data['name'])
|
|
if 'error' in result:
|
|
return jsonify(result), 500
|
|
return jsonify(result)
|
|
|
|
@app.route('/api/volumes/<name>', methods=['DELETE'])
|
|
def remove_volume(name):
|
|
if not is_local_request():
|
|
return jsonify({'error': 'Access denied'}), 403
|
|
force = request.args.get('force', default=False, type=bool)
|
|
success = container_manager.remove_volume(name, force=force)
|
|
return jsonify({'removed': success})
|
|
|
|
|
|
|
|
# ── Peer-scoped routes (/api/peer/*) ─────────────────────────────────────────
|
|
# These routes are accessible to peer-role sessions only (enforced by
|
|
# the enforce_auth before_request hook above).
|
|
|
|
@app.route('/api/peer/dashboard', methods=['GET'])
|
|
def peer_dashboard():
|
|
"""Return dashboard info for the authenticated peer including live WireGuard stats."""
|
|
peer_name = session.get('peer_name')
|
|
peer = peer_registry.get_peer(peer_name) if peer_name else None
|
|
if not peer:
|
|
return jsonify({'error': 'Peer not found'}), 404
|
|
|
|
wg_stats = {'online': None, 'transfer_rx': 0, 'transfer_tx': 0, 'last_handshake': None}
|
|
public_key = peer.get('public_key')
|
|
if public_key:
|
|
try:
|
|
wg_stats = wireguard_manager.get_peer_status(public_key)
|
|
except Exception:
|
|
pass
|
|
|
|
peer_ip = peer.get('ip', '')
|
|
allowed_ips = f"{peer_ip.split('/')[0]}/32" if peer_ip else ''
|
|
|
|
return jsonify({
|
|
'peer_name': peer_name,
|
|
'ip': peer_ip,
|
|
'service_access': peer.get('service_access', []),
|
|
'online': wg_stats.get('online'),
|
|
'rx_bytes': wg_stats.get('transfer_rx', 0),
|
|
'tx_bytes': wg_stats.get('transfer_tx', 0),
|
|
'last_handshake': wg_stats.get('last_handshake'),
|
|
'allowed_ips': peer.get('allowed_ips', allowed_ips),
|
|
})
|
|
|
|
|
|
@app.route('/api/peer/services', methods=['GET'])
|
|
def peer_services():
|
|
"""Return service credentials and access info for the authenticated peer."""
|
|
peer_name = session.get('peer_name')
|
|
peer = peer_registry.get_peer(peer_name) if peer_name else None
|
|
if not peer:
|
|
return jsonify({'error': 'Peer not found'}), 404
|
|
|
|
domain = _configured_domain()
|
|
peer_ip = peer.get('ip', '')
|
|
|
|
server_public_key = ''
|
|
wg_port = 51820
|
|
try:
|
|
server_public_key = wireguard_manager.get_keys().get('public_key', '')
|
|
wg_port = config_manager.configs.get('_identity', {}).get('wireguard_port', 51820)
|
|
except Exception:
|
|
pass
|
|
|
|
return jsonify({
|
|
'wireguard': {
|
|
'ip': peer_ip,
|
|
'server_public_key': server_public_key,
|
|
'endpoint_port': wg_port,
|
|
'dns': '10.0.0.1',
|
|
},
|
|
'email': {
|
|
'username': f'{peer_name}@{domain}',
|
|
'imap_host': f'mail.{domain}',
|
|
'smtp_host': f'mail.{domain}',
|
|
'imap_port': 993,
|
|
'smtp_port': 587,
|
|
},
|
|
'caldav': {
|
|
'url': f'http://radicale.{domain}:5232',
|
|
'username': peer_name,
|
|
},
|
|
'webdav': {
|
|
'url': f'http://webdav.{domain}',
|
|
'username': peer_name,
|
|
},
|
|
})
|
|
|
|
|
|
if __name__ == '__main__':
|
|
debug = os.environ.get('FLASK_DEBUG', '0') == '1'
|
|
app.run(host='0.0.0.0', port=3000, debug=debug) |