A5: Extract config routes into blueprint (app.py 1294 → 579 lines)

Move all /api/config/* routes and pending-restart helpers into
routes/config.py. Re-export helpers from app.py for backward compat:

  from routes.config import _set_pending_restart, _clear_pending_restart,
                           _collect_service_ports, _dedup_changes

Test patches updated:
  app._set_pending_restart     → routes.config._set_pending_restart
  app._clear_pending_restart   → routes.config._clear_pending_restart
  app.threading.Thread         → routes.config.threading.Thread

Remaining in app.py: Flask setup, middleware, health monitor thread,
/health, /api/status, /api/health/history* (use module-level state).

1021 tests passing.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-05-01 06:53:24 -04:00
parent 09138fbc18
commit 5d0238ff3c
3 changed files with 687 additions and 730 deletions
+8 -724
View File
@@ -327,6 +327,7 @@ from routes.vault import bp as _vault_bp
from routes.containers import bp as _containers_bp
from routes.services import bp as _services_bp
from routes.peer_dashboard import bp as _peer_dashboard_bp
from routes.config import bp as _config_bp
app.register_blueprint(_email_bp)
app.register_blueprint(_calendar_bp)
app.register_blueprint(_files_bp)
@@ -339,6 +340,13 @@ app.register_blueprint(_vault_bp)
app.register_blueprint(_containers_bp)
app.register_blueprint(_services_bp)
app.register_blueprint(_peer_dashboard_bp)
app.register_blueprint(_config_bp)
# Re-export config helpers so existing test imports/patches keep working
from routes.config import (
_set_pending_restart, _clear_pending_restart,
_collect_service_ports, _dedup_changes,
)
# Unified health monitoring
HEALTH_HISTORY_SIZE = 100
@@ -553,730 +561,6 @@ def get_cell_status():
logger.error(f"Error getting cell status: {e}")
return jsonify({"error": str(e)}), 500
@app.route('/api/config', methods=['GET'])
def get_config():
"""Get cell configuration."""
try:
service_configs = config_manager.get_all_configs()
identity = service_configs.pop('_identity', {})
config = {
'cell_name': identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell')),
'domain': identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell')),
'ip_range': identity.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')),
'wireguard_port': identity.get('wireguard_port', int(os.environ.get('WG_PORT', '51820'))),
}
# Expose computed per-service IPs so the frontend doesn't need to derive them
import ip_utils as _ip_utils_cfg
_ips = _ip_utils_cfg.get_service_ips(config['ip_range'])
config['service_ips'] = {
'dns': _ips['dns'],
'vip_mail': _ips['vip_mail'],
'vip_calendar': _ips['vip_calendar'],
'vip_files': _ips['vip_files'],
'vip_webdav': _ips['vip_webdav'],
}
config['service_configs'] = service_configs
return jsonify(config)
except Exception as e:
logger.error(f"Error getting config: {e}")
return jsonify({"error": str(e)}), 500
@app.route('/api/config', methods=['PUT'])
def update_config():
"""Update cell configuration."""
try:
data = request.get_json(silent=True)
if data is None:
return jsonify({"error": "No data provided"}), 400
# Handle identity fields (cell_name, domain, ip_range, wireguard_port)
identity_keys = {'cell_name', 'domain', 'ip_range', 'wireguard_port'}
identity_updates = {k: v for k, v in data.items() if k in identity_keys}
# Validate cell_name and domain — block injection characters while
# allowing the full range of valid hostname/domain characters.
import re as _re_cfg
# cell_name: hostname component — letters, digits, hyphens only (no dots)
_CELL_NAME_RE = _re_cfg.compile(r'^[a-zA-Z0-9][a-zA-Z0-9-]{0,254}$')
# domain: may include dots for multi-label names (e.g. home.lan)
_DOMAIN_RE = _re_cfg.compile(r'^[a-zA-Z0-9][a-zA-Z0-9.-]{0,254}$')
if 'cell_name' in identity_updates:
v = str(identity_updates['cell_name'])
if not v:
return jsonify({'error': 'cell_name cannot be empty'}), 400
if len(v) > 255:
return jsonify({'error': 'cell_name must be 255 characters or fewer'}), 400
if not _CELL_NAME_RE.match(v):
return jsonify({'error': 'Invalid cell_name: use only letters, digits, hyphens'}), 400
if 'domain' in identity_updates:
v = str(identity_updates['domain'])
if not v:
return jsonify({'error': 'domain cannot be empty'}), 400
if len(v) > 255:
return jsonify({'error': 'domain must be 255 characters or fewer'}), 400
if not _DOMAIN_RE.match(v):
return jsonify({'error': 'Invalid domain: use only letters, digits, hyphens, dots'}), 400
# Validate ip_range — must be a valid CIDR within an RFC-1918 range
if 'ip_range' in identity_updates:
import ipaddress as _ipa
_rfc1918 = [
_ipa.ip_network('10.0.0.0/8'),
_ipa.ip_network('172.16.0.0/12'),
_ipa.ip_network('192.168.0.0/16'),
]
try:
_raw = str(identity_updates['ip_range'])
if '/' not in _raw:
return jsonify({'error': 'ip_range must include a CIDR prefix (e.g. 172.20.0.0/16)'}), 400
_net = _ipa.ip_network(_raw, strict=False)
if not any(_net.subnet_of(r) for r in _rfc1918):
return jsonify({'error': (
'ip_range must be within an RFC-1918 private range '
'(10.0.0.0/8, 172.16.0.0/12, or 192.168.0.0/16)'
)}), 400
except ValueError as _e:
return jsonify({'error': f'Invalid ip_range: {_e}'}), 400
# Validate service config port and IP fields
_port_fields = {
'network': ['dns_port'],
'wireguard': ['port'],
'email': ['smtp_port', 'submission_port', 'imap_port', 'webmail_port'],
'calendar': ['port'],
'files': ['port', 'manager_port'],
}
for _svc, _fields in _port_fields.items():
if _svc not in data:
continue
_svc_data = data[_svc]
if not isinstance(_svc_data, dict):
continue
for _f in _fields:
if _f in _svc_data and _svc_data[_f] is not None and _svc_data[_f] != '':
try:
_p = int(_svc_data[_f])
if not (1 <= _p <= 65535):
raise ValueError()
except (ValueError, TypeError):
return jsonify({'error': f'{_svc}.{_f} must be an integer between 1 and 65535'}), 400
# Validate that no two service sections use the same port number
_conflicts = detect_conflicts(config_manager.configs, data)
if _conflicts:
_msgs = []
for _c in _conflicts:
_pairs = ', '.join(f"{_s}.{_f}" for _s, _f in _c['conflicts'])
_msgs.append(f"port {_c['port']} is used by {_pairs}")
return jsonify({'error': 'Port conflict: ' + '; '.join(_msgs)}), 409
# Validate WireGuard address (must be valid IP/CIDR)
if 'wireguard' in data and isinstance(data['wireguard'], dict):
_addr = data['wireguard'].get('address')
if _addr:
import ipaddress as _ipa2
if '/' not in str(_addr):
return jsonify({'error': 'wireguard.address must include a prefix length (e.g. 10.0.0.1/24)'}), 400
try:
_ipa2.ip_interface(_addr)
except ValueError as _e:
return jsonify({'error': f'wireguard.address is not a valid IP/CIDR: {_e}'}), 400
# Capture old identity and service configs BEFORE saving, for change detection + revert
import copy as _copy
old_identity = dict(config_manager.configs.get('_identity', {}))
old_svc_configs = {
svc: dict(config_manager.configs.get(svc, {}))
for svc in data if svc in config_manager.service_schemas
}
# Full pre-change snapshot — used by Discard to revert to original state.
# Must be captured here, before any config writes, so it holds the true old values.
_pre_change_snapshot = {k: _copy.deepcopy(v) for k, v in config_manager.configs.items()
if not k.startswith('_')}
_pre_change_snapshot['_identity'] = _copy.deepcopy(config_manager.configs.get('_identity', {}))
if identity_updates:
stored = config_manager.configs.get('_identity', {})
stored.update(identity_updates)
config_manager.configs['_identity'] = stored
config_manager._save_all_configs()
# Map service names to their manager instances
_svc_managers = {
'network': network_manager,
'wireguard': wireguard_manager,
'email': email_manager,
'calendar': calendar_manager,
'files': file_manager,
'routing': routing_manager,
'vault': app.vault_manager,
}
all_restarted = []
all_warnings = []
# Update service configurations: persist + apply to real config files
for service, config in data.items():
if service in config_manager.service_schemas:
config_manager.update_service_config(service, config)
mgr = _svc_managers.get(service)
if mgr:
mgr.update_config(config)
result = mgr.apply_config(config)
all_restarted.extend(result.get('restarted', []))
all_warnings.extend(result.get('warnings', []))
service_bus.publish_event(EventType.CONFIG_CHANGED, service, {
'service': service,
'config': config
})
# VPN port or subnet change → all peer client configs are stale
if service == 'wireguard' and ('port' in config or 'address' in config):
for p in peer_registry.list_peers():
peer_registry.update_peer(p['peer'], {'config_needs_reinstall': True})
n = len(peer_registry.list_peers())
if n:
all_warnings.append(f'WireGuard endpoint changed — {n} peer(s) must reinstall VPN config')
# Keep identity.wireguard_port in sync with service config port
if 'port' in config:
_id = config_manager.configs.get('_identity', {})
_id['wireguard_port'] = config['port']
config_manager.configs['_identity'] = _id
config_manager._save_all_configs()
# Apply cell identity domain to network and email services (write files, defer reload)
if identity_updates.get('domain') and identity_updates['domain'] != old_identity.get('domain', ''):
domain = identity_updates['domain']
net_result = network_manager.apply_domain(domain, reload=False)
all_warnings.extend(net_result.get('warnings', []))
# Regenerate Caddyfile — virtual host names change with the domain
import ip_utils as _ip_domain
_cur_id = config_manager.configs.get('_identity', {})
_cur_range = _cur_id.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16'))
_cur_name = _cur_id.get('cell_name', os.environ.get('CELL_NAME', 'mycell'))
_ip_domain.write_caddyfile(_cur_range, _cur_name, domain, '/app/config-caddy/Caddyfile')
_set_pending_restart(
[f'domain changed to {domain}'],
['dns', 'caddy'],
pre_change_snapshot=_pre_change_snapshot,
)
# Apply cell name change to DNS hostname record (write files, defer reload)
if identity_updates.get('cell_name'):
old_name = old_identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell'))
new_name = identity_updates['cell_name']
if old_name != new_name:
cn_result = network_manager.apply_cell_name(old_name, new_name, reload=False)
all_warnings.extend(cn_result.get('warnings', []))
# Regenerate Caddyfile — main virtual host name changes with cell_name
import ip_utils as _ip_name
_cur_id2 = config_manager.configs.get('_identity', {})
_cur_range2 = _cur_id2.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16'))
_cur_domain2 = identity_updates.get('domain') or _cur_id2.get('domain', os.environ.get('CELL_DOMAIN', 'cell'))
_ip_name.write_caddyfile(_cur_range2, new_name, _cur_domain2, '/app/config-caddy/Caddyfile')
_set_pending_restart(
[f'cell_name changed to {new_name}'],
['dns'],
pre_change_snapshot=_pre_change_snapshot,
)
# Apply ip_range change: regenerate DNS records, update virtual IPs + firewall rules
if identity_updates.get('ip_range') and identity_updates['ip_range'] != old_identity.get('ip_range', ''):
import ip_utils
new_range = identity_updates['ip_range']
cur_identity = config_manager.configs.get('_identity', {})
cur_cell_name = cur_identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell'))
cur_domain = cur_identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell'))
# Update DNS zone records immediately
ip_result = network_manager.apply_ip_range(new_range, cur_cell_name, cur_domain)
all_restarted.extend(ip_result.get('restarted', []))
all_warnings.extend(ip_result.get('warnings', []))
# Update firewall virtual IPs (iptables) and Caddy virtual IPs immediately
firewall_manager.update_service_ips(new_range)
firewall_manager.ensure_caddy_virtual_ips()
# Write new .env with updated IPs (and current ports) for next container start
env_file = os.environ.get('COMPOSE_ENV_FILE', '/app/.env.compose')
ip_utils.write_env_file(new_range, env_file, _collect_service_ports(config_manager.configs))
# Regenerate Caddyfile with new VIPs
ip_utils.write_caddyfile(new_range, cur_cell_name, cur_domain,
'/app/config-caddy/Caddyfile')
# Mark ALL containers as needing restart; network_recreate signals that
# docker compose down is required before up (Docker can't change subnet in-place)
_set_pending_restart(
[f'ip_range changed to {new_range} — network will be recreated'],
['*'], network_recreate=True,
pre_change_snapshot=_pre_change_snapshot,
)
# Detect port changes across service configs and identity
# Maps (service_key, field_name) → (port_env_key, [containers])
_PORT_CHANGE_MAP = {
('network', 'dns_port'): ('dns_port', ['dns']),
('wireguard','port'): ('wg_port', ['wireguard']),
('email', 'smtp_port'): ('mail_smtp_port', ['mail']),
('email', 'submission_port'): ('mail_submission_port', ['mail']),
('email', 'imap_port'): ('mail_imap_port', ['mail']),
('email', 'webmail_port'): ('rainloop_port', ['rainloop']),
('calendar', 'port'): ('radicale_port', ['radicale']),
('files', 'port'): ('webdav_port', ['webdav']),
('files', 'manager_port'): ('filegator_port', ['filegator']),
}
port_changed_containers = set()
port_change_messages = []
import ip_utils as _ip_utils_pcd
for (svc_key, field), (_env_key, containers) in _PORT_CHANGE_MAP.items():
if svc_key in data and field in data[svc_key]:
default_val = _ip_utils_pcd.PORT_DEFAULTS.get(_env_key)
old_val = old_svc_configs.get(svc_key, {}).get(field, default_val)
new_val = data[svc_key][field]
if old_val != new_val:
port_changed_containers.update(containers)
port_change_messages.append(
f'{svc_key} {field}: {old_val}{new_val}'
)
# wireguard_port in identity also drives WG_PORT env var; sync to service config
if 'wireguard_port' in identity_updates:
old_wg = old_identity.get('wireguard_port', _ip_utils_pcd.PORT_DEFAULTS.get('wg_port', 51820))
new_wg = identity_updates['wireguard_port']
if old_wg != new_wg:
# Sync to wireguard service config and update wg0.conf
_wg_svc = config_manager.configs.get('wireguard', {})
_wg_svc['port'] = new_wg
config_manager.update_service_config('wireguard', _wg_svc)
wireguard_manager.apply_config({'port': new_wg})
port_changed_containers.add('wireguard')
port_change_messages.append(f'wireguard_port: {old_wg}{new_wg}')
if port_changed_containers:
import ip_utils as _ip_utils_ports
_env_file = os.environ.get('COMPOSE_ENV_FILE', '/app/.env.compose')
_ip_range = config_manager.configs.get('_identity', {}).get(
'ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')
)
_ip_utils_ports.write_env_file(
_ip_range, _env_file, _collect_service_ports(config_manager.configs)
)
_set_pending_restart(port_change_messages, list(port_changed_containers),
pre_change_snapshot=_pre_change_snapshot)
logger.info(f"Updated config, restarted: {all_restarted}")
return jsonify({
"message": "Configuration updated and applied",
"restarted": all_restarted,
"warnings": all_warnings,
})
except Exception as e:
logger.error(f"Error updating config: {e}")
return jsonify({"error": str(e)}), 500
# ---------------------------------------------------------------------------
# Pending-restart helpers
# ---------------------------------------------------------------------------
def _collect_service_ports(configs: dict) -> dict:
"""Extract current port values from service configs for .env generation."""
ports = {}
net = configs.get('network', {})
wg = configs.get('wireguard', {})
email = configs.get('email', {})
cal = configs.get('calendar', {})
files = configs.get('files', {})
identity = configs.get('_identity', {})
if 'dns_port' in net: ports['dns_port'] = net['dns_port']
if 'port' in wg: ports['wg_port'] = wg['port']
elif 'wireguard_port' in identity: ports['wg_port'] = identity['wireguard_port']
if 'smtp_port' in email: ports['mail_smtp_port'] = email['smtp_port']
if 'submission_port' in email: ports['mail_submission_port'] = email['submission_port']
if 'imap_port' in email: ports['mail_imap_port'] = email['imap_port']
if 'webmail_port' in email: ports['rainloop_port'] = email['webmail_port']
if 'port' in cal: ports['radicale_port'] = cal['port']
if 'port' in files: ports['webdav_port'] = files['port']
if 'manager_port' in files: ports['filegator_port'] = files['manager_port']
return ports
def _dedup_changes(existing: list, new: list) -> list:
"""Merge change lists, keeping only the latest entry per config key."""
def key_of(msg: str) -> str:
# "ip_range changed to X" → "ip_range"
if ' changed' in msg:
return msg.split(' changed')[0].strip()
# "network dns_port: 52 → 53" → "network dns_port"
if ':' in msg:
return msg.split(':')[0].strip()
return msg
merged = {key_of(c): c for c in existing}
merged.update({key_of(c): c for c in new})
return list(merged.values())
def _set_pending_restart(changes: list, containers: list = None, network_recreate: bool = False,
pre_change_snapshot: dict = None):
"""Record that specific containers need to be restarted to apply configuration.
containers: list of docker-compose service names, or None/'*' to restart all.
network_recreate: True when the Docker bridge subnet changed (requires down+up).
pre_change_snapshot: full config captured BEFORE this save (for Discard to revert).
Merges with any existing pending state so multiple changes accumulate.
"""
from datetime import datetime as _dt
existing = config_manager.configs.get('_pending_restart', {})
existing_changes = existing.get('changes', []) if existing.get('needs_restart') else []
existing_containers = existing.get('containers', []) if existing.get('needs_restart') else []
# Keep the oldest snapshot (the true pre-change state). Never overwrite it with a
# later snapshot — subsequent changes while pending should still revert to origin.
if not existing.get('needs_restart'):
snapshot = pre_change_snapshot or {}
else:
snapshot = existing.get('_snapshot', {})
if containers is None or '*' in (containers or []) or existing_containers == ['*']:
new_containers = ['*']
else:
new_containers = list(set(existing_containers) | set(containers))
config_manager.configs['_pending_restart'] = {
'needs_restart': True,
'changed_at': _dt.utcnow().isoformat(),
'changes': _dedup_changes(existing_changes, changes),
'containers': new_containers,
'network_recreate': network_recreate or existing.get('network_recreate', False),
'_snapshot': snapshot,
}
config_manager._save_all_configs()
def _clear_pending_restart():
config_manager.configs['_pending_restart'] = {
'needs_restart': False, 'changes': [], 'containers': [], 'network_recreate': False
}
config_manager._save_all_configs()
@app.route('/api/config/pending', methods=['GET'])
def get_pending_config():
"""Return whether there are unapplied configuration changes that require a restart."""
pending = config_manager.configs.get('_pending_restart', {})
return jsonify({
'needs_restart': pending.get('needs_restart', False),
'applying': pending.get('applying', False),
'changed_at': pending.get('changed_at'),
'changes': pending.get('changes', []),
'containers': pending.get('containers', ['*']),
})
@app.route('/api/config/pending', methods=['DELETE'])
def cancel_pending_config():
"""Discard pending configuration changes and restore config to pre-change snapshot."""
pending = config_manager.configs.get('_pending_restart', {})
snapshot = pending.get('_snapshot', {})
if snapshot:
# Capture current (changed) identity before reverting, to rewrite config files
cur_identity = dict(config_manager.configs.get('_identity', {}))
old_identity = snapshot.get('_identity', {})
# Restore config values from snapshot
for k, v in snapshot.items():
config_manager.configs[k] = v
# Rewrite DNS/Caddy config files back to old values so they match restored config
import ip_utils as _ip_revert
_id = config_manager.configs.get('_identity', {})
_range = _id.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16'))
_cell = _id.get('cell_name', os.environ.get('CELL_NAME', 'mycell'))
_dom = _id.get('domain', os.environ.get('CELL_DOMAIN', 'cell'))
cur_domain = cur_identity.get('domain', '')
old_domain = old_identity.get('domain', '')
if cur_domain and old_domain and cur_domain != old_domain:
network_manager.apply_domain(old_domain, reload=False)
cur_cell_name = cur_identity.get('cell_name', '')
old_cell_name = old_identity.get('cell_name', '')
if cur_cell_name and old_cell_name and cur_cell_name != old_cell_name:
network_manager.apply_cell_name(cur_cell_name, old_cell_name, reload=False)
_ip_revert.write_caddyfile(_range, _cell, _dom, '/app/config-caddy/Caddyfile')
_clear_pending_restart()
return jsonify({'message': 'Pending changes discarded'})
@app.route('/api/config/apply', methods=['POST'])
def apply_pending_config():
"""Apply pending configuration by restarting containers via docker compose up -d."""
try:
pending = config_manager.configs.get('_pending_restart', {})
if not pending.get('needs_restart'):
return jsonify({'message': 'No pending changes to apply'})
# Get project working dir, image name, and data-dir host path from our container labels/mounts
project_dir = '/home/roof/pic'
api_image = 'pic_api:latest' # fallback (docker-compose v1 naming)
data_host_path = '/home/roof/pic/data/api' # fallback
try:
import docker as _docker_sdk
_client = _docker_sdk.from_env()
_self = _client.containers.get('cell-api')
project_dir = _self.labels.get('com.docker.compose.project.working_dir', project_dir)
# Use the actual image tag so the helper works regardless of compose version
# (docker-compose v1 builds pic_api:latest, compose v2+ builds pic-api:latest)
tags = _self.image.tags
if tags:
api_image = tags[0]
# Find the host-side path for /app/data so the helper can clear the pending flag
for _m in _self.attrs.get('Mounts', []):
if _m.get('Destination') == '/app/data':
data_host_path = _m.get('Source', data_host_path)
break
except Exception:
pass
containers = pending.get('containers', ['*'])
# Check if the IP range (network subnet) is changing — Docker cannot modify an
# existing network's subnet in-place, so we need `down` + `up` in that case.
needs_network_recreate = pending.get('network_recreate', False)
host_env = os.path.join(project_dir, '.env')
host_compose = os.path.join(project_dir, 'docker-compose.yml')
if '*' in containers:
# All-services restart: `docker compose down` or `up -d` may stop/recreate the
# API container itself, killing this background thread mid-operation.
# Spawn an independent helper container (same image as cell-api) that has docker
# CLI and survives cell-api being stopped/recreated.
#
# Mark as "applying" rather than clearing early. The helper clears the flag on
# success by writing to cell_config.json directly (via the /app/data mount).
# If the helper fails, needs_restart stays True so the UI continues showing
# pending changes after the API restarts. On the next startup, if "applying"
# is still set, _recover_pending_apply() resets it so the user can retry.
config_manager.configs['_pending_restart']['applying'] = True
config_manager._save_all_configs()
# Encode the clear script in base64 to avoid shell-quoting issues.
import base64 as _b64
_clear_py = (
"import json,os; p='/app/data/cell_config.json';"
"f=open(p); d=json.load(f); f.close();"
"d['_pending_restart']={'needs_restart':False,'changes':[],'containers':[],'network_recreate':False};"
"tmp=p+'.tmp'; open(tmp,'w').write(json.dumps(d,indent=2)); os.replace(tmp,p)"
)
_b64_cmd = _b64.b64encode(_clear_py.encode()).decode()
clear_flag_cmd = f"python3 -c \"import base64; exec(base64.b64decode('{_b64_cmd}').decode())\""
if needs_network_recreate:
helper_script = (
f'sleep 2'
f' && docker compose --project-directory {project_dir}'
f' -f {host_compose} --env-file {host_env} down'
f' && {clear_flag_cmd}'
f' && docker compose --project-directory {project_dir}'
f' -f {host_compose} --env-file {host_env} up -d'
)
else:
helper_script = (
f'sleep 2'
f' && {clear_flag_cmd}'
f' && docker compose --project-directory {project_dir}'
f' -f {host_compose} --env-file {host_env} up -d'
)
def _do_apply():
import subprocess as _subprocess
_subprocess.Popen(
['docker', 'run', '--rm',
'-v', '/var/run/docker.sock:/var/run/docker.sock',
'-v', f'{project_dir}:{project_dir}',
'-v', f'{data_host_path}:/app/data',
'--entrypoint', 'sh',
api_image,
'-c', helper_script],
close_fds=True,
stdout=_subprocess.DEVNULL,
stderr=_subprocess.DEVNULL,
)
logger.info(
'spawned helper container for all-services restart'
+ (' (network_recreate)' if needs_network_recreate else '')
)
else:
# Specific containers only — API is not affected, run directly from here.
# Only clear the pending flag after the subprocess exits with code 0 so that
# if the compose command fails the UI still shows changes as pending.
def _do_apply():
import time as _time
import subprocess as _subprocess
_time.sleep(0.3)
result = _subprocess.run(
['docker', 'compose',
'--project-directory', project_dir,
'-f', '/app/docker-compose.yml',
'--env-file', '/app/.env.compose',
'up', '-d', '--no-deps', '--force-recreate'] + containers,
capture_output=True, text=True, timeout=120,
)
if result.returncode != 0:
logger.error(f"docker compose up failed: {result.stderr.strip()}")
else:
logger.info(f'docker compose up completed for: {containers}')
_clear_pending_restart()
threading.Thread(target=_do_apply, daemon=False).start()
return jsonify({
'message': 'Applying configuration — containers are restarting',
'restart_in_progress': True,
})
except Exception as e:
logger.error(f"Error applying config: {e}")
return jsonify({'error': str(e)}), 500
# Configuration management endpoints
@app.route('/api/config/backup', methods=['POST'])
def create_config_backup():
"""Create configuration backup."""
try:
backup_id = config_manager.backup_config()
service_bus.publish_event(EventType.BACKUP_CREATED, 'api', {
'backup_id': backup_id,
'timestamp': datetime.utcnow().isoformat()
})
return jsonify({"backup_id": backup_id})
except Exception as e:
logger.error(f"Error creating backup: {e}")
return jsonify({"error": str(e)}), 500
@app.route('/api/config/backups', methods=['GET'])
def list_config_backups():
"""List available backups."""
try:
backups = config_manager.list_backups()
return jsonify(backups)
except Exception as e:
logger.error(f"Error listing backups: {e}")
return jsonify({"error": str(e)}), 500
@app.route('/api/config/restore/<backup_id>', methods=['POST'])
def restore_config(backup_id):
"""Restore configuration from backup. Body may contain {services: [...]} for selective restore."""
try:
data = request.get_json(silent=True) or {}
services = data.get('services') # None = full restore
success = config_manager.restore_config(backup_id, services=services)
if success:
service_bus.publish_event(EventType.RESTORE_COMPLETED, 'api', {
'backup_id': backup_id,
'timestamp': datetime.utcnow().isoformat()
})
return jsonify({"message": f"Configuration restored from backup: {backup_id}"})
else:
return jsonify({"error": f"Failed to restore backup: {backup_id}"}), 500
except Exception as e:
logger.error(f"Error restoring backup: {e}")
return jsonify({"error": str(e)}), 500
@app.route('/api/config/export', methods=['GET'])
def export_config():
"""Export configuration."""
try:
format = request.args.get('format', 'json')
config_data = config_manager.export_config(format)
return jsonify({"config": config_data, "format": format})
except Exception as e:
logger.error(f"Error exporting config: {e}")
return jsonify({"error": str(e)}), 500
@app.route('/api/config/import', methods=['POST'])
def import_config():
"""Import configuration."""
try:
data = request.get_json(silent=True)
if data is None:
return jsonify({"error": "No data provided"}), 400
config_data = data.get('config')
format = data.get('format', 'json')
success = config_manager.import_config(config_data, format)
if success:
return jsonify({"message": "Configuration imported successfully"})
else:
return jsonify({"error": "Failed to import configuration"}), 500
except Exception as e:
logger.error(f"Error importing config: {e}")
return jsonify({"error": str(e)}), 500
@app.route('/api/config/backups/<backup_id>/download', methods=['GET'])
def download_backup(backup_id):
"""Download a backup as a zip file."""
try:
from pathlib import Path
backup_path = config_manager.backup_dir / backup_id
if not backup_path.exists():
return jsonify({'error': f'Backup {backup_id} not found'}), 404
buf = io.BytesIO()
with zipfile.ZipFile(buf, 'w', zipfile.ZIP_DEFLATED) as zf:
for f in backup_path.rglob('*'):
if f.is_file():
zf.write(f, f.relative_to(backup_path))
buf.seek(0)
return send_file(buf, mimetype='application/zip',
as_attachment=True,
download_name=f'{backup_id}.zip')
except Exception as e:
logger.error(f"Error downloading backup: {e}")
return jsonify({'error': str(e)}), 500
@app.route('/api/config/backup/upload', methods=['POST'])
def upload_backup():
"""Upload a backup zip file."""
try:
if 'file' not in request.files:
return jsonify({'error': 'No file provided'}), 400
f = request.files['file']
filename = f.filename or ''
if filename.endswith('.zip'):
backup_id = filename[:-4]
else:
backup_id = f"backup_{datetime.utcnow().strftime('%Y%m%d_%H%M%S')}"
backup_id = ''.join(c for c in backup_id if c.isalnum() or c == '_')
backup_path = config_manager.backup_dir / backup_id
backup_path.mkdir(parents=True, exist_ok=True)
try:
with zipfile.ZipFile(io.BytesIO(f.read())) as zf:
zf.extractall(backup_path)
except zipfile.BadZipFile:
shutil.rmtree(backup_path, ignore_errors=True)
return jsonify({'error': 'Invalid zip file'}), 400
if not (backup_path / 'manifest.json').exists():
shutil.rmtree(backup_path, ignore_errors=True)
return jsonify({'error': 'Invalid backup: missing manifest.json'}), 400
return jsonify({'backup_id': backup_id})
except Exception as e:
logger.error(f"Error uploading backup: {e}")
return jsonify({'error': str(e)}), 500
@app.route('/api/config/backups/<backup_id>', methods=['DELETE'])
def delete_config_backup(backup_id):
"""Delete a configuration backup."""
try:
success = config_manager.delete_backup(backup_id)
if success:
return jsonify({"message": f"Backup {backup_id} deleted"})
else:
return jsonify({"error": f"Failed to delete backup {backup_id}"}), 500
except Exception as e:
logger.error(f"Error deleting backup: {e}")
return jsonify({"error": str(e)}), 500
@app.route('/api/health/history', methods=['GET'])
def get_health_history():
"""Get recent unified health check results."""
+673
View File
@@ -0,0 +1,673 @@
import io
import os
import re
import copy
import json
import ipaddress
import zipfile
import shutil
import logging
import threading
from datetime import datetime
from flask import Blueprint, request, jsonify, send_file, current_app
logger = logging.getLogger('picell')
bp = Blueprint('config', __name__)
# ---------------------------------------------------------------------------
# Pending-restart helpers
# ---------------------------------------------------------------------------
def _collect_service_ports(configs: dict) -> dict:
"""Extract current port values from service configs for .env generation."""
from app import config_manager as _cm
ports = {}
net = configs.get('network', {})
wg = configs.get('wireguard', {})
email = configs.get('email', {})
cal = configs.get('calendar', {})
files = configs.get('files', {})
identity = configs.get('_identity', {})
if 'dns_port' in net: ports['dns_port'] = net['dns_port']
if 'port' in wg: ports['wg_port'] = wg['port']
elif 'wireguard_port' in identity: ports['wg_port'] = identity['wireguard_port']
if 'smtp_port' in email: ports['mail_smtp_port'] = email['smtp_port']
if 'submission_port' in email: ports['mail_submission_port'] = email['submission_port']
if 'imap_port' in email: ports['mail_imap_port'] = email['imap_port']
if 'webmail_port' in email: ports['rainloop_port'] = email['webmail_port']
if 'port' in cal: ports['radicale_port'] = cal['port']
if 'port' in files: ports['webdav_port'] = files['port']
if 'manager_port' in files: ports['filegator_port'] = files['manager_port']
return ports
def _dedup_changes(existing: list, new: list) -> list:
"""Merge change lists, keeping only the latest entry per config key."""
def key_of(msg: str) -> str:
if ' changed' in msg:
return msg.split(' changed')[0].strip()
if ':' in msg:
return msg.split(':')[0].strip()
return msg
merged = {key_of(c): c for c in existing}
merged.update({key_of(c): c for c in new})
return list(merged.values())
def _set_pending_restart(changes: list, containers: list = None, network_recreate: bool = False,
pre_change_snapshot: dict = None):
"""Record that specific containers need to be restarted to apply configuration."""
from app import config_manager
existing = config_manager.configs.get('_pending_restart', {})
existing_changes = existing.get('changes', []) if existing.get('needs_restart') else []
existing_containers = existing.get('containers', []) if existing.get('needs_restart') else []
if not existing.get('needs_restart'):
snapshot = pre_change_snapshot or {}
else:
snapshot = existing.get('_snapshot', {})
if containers is None or '*' in (containers or []) or existing_containers == ['*']:
new_containers = ['*']
else:
new_containers = list(set(existing_containers) | set(containers))
config_manager.configs['_pending_restart'] = {
'needs_restart': True,
'changed_at': datetime.utcnow().isoformat(),
'changes': _dedup_changes(existing_changes, changes),
'containers': new_containers,
'network_recreate': network_recreate or existing.get('network_recreate', False),
'_snapshot': snapshot,
}
config_manager._save_all_configs()
def _clear_pending_restart():
from app import config_manager
config_manager.configs['_pending_restart'] = {
'needs_restart': False, 'changes': [], 'containers': [], 'network_recreate': False
}
config_manager._save_all_configs()
# ---------------------------------------------------------------------------
# Config routes
# ---------------------------------------------------------------------------
@bp.route('/api/config', methods=['GET'])
def get_config():
try:
from app import config_manager
import ip_utils as _ip_utils_cfg
service_configs = config_manager.get_all_configs()
identity = service_configs.pop('_identity', {})
config = {
'cell_name': identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell')),
'domain': identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell')),
'ip_range': identity.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')),
'wireguard_port': identity.get('wireguard_port', int(os.environ.get('WG_PORT', '51820'))),
}
_ips = _ip_utils_cfg.get_service_ips(config['ip_range'])
config['service_ips'] = {
'dns': _ips['dns'],
'vip_mail': _ips['vip_mail'],
'vip_calendar': _ips['vip_calendar'],
'vip_files': _ips['vip_files'],
'vip_webdav': _ips['vip_webdav'],
}
config['service_configs'] = service_configs
return jsonify(config)
except Exception as e:
logger.error(f"Error getting config: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/config', methods=['PUT'])
def update_config():
try:
from app import (config_manager, network_manager, wireguard_manager, email_manager,
calendar_manager, file_manager, routing_manager,
peer_registry, firewall_manager, service_bus, EventType, detect_conflicts)
import ip_utils
data = request.get_json(silent=True)
if data is None:
return jsonify({"error": "No data provided"}), 400
identity_keys = {'cell_name', 'domain', 'ip_range', 'wireguard_port'}
identity_updates = {k: v for k, v in data.items() if k in identity_keys}
_CELL_NAME_RE = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9-]{0,254}$')
_DOMAIN_RE = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9.-]{0,254}$')
if 'cell_name' in identity_updates:
v = str(identity_updates['cell_name'])
if not v:
return jsonify({'error': 'cell_name cannot be empty'}), 400
if len(v) > 255:
return jsonify({'error': 'cell_name must be 255 characters or fewer'}), 400
if not _CELL_NAME_RE.match(v):
return jsonify({'error': 'Invalid cell_name: use only letters, digits, hyphens'}), 400
if 'domain' in identity_updates:
v = str(identity_updates['domain'])
if not v:
return jsonify({'error': 'domain cannot be empty'}), 400
if len(v) > 255:
return jsonify({'error': 'domain must be 255 characters or fewer'}), 400
if not _DOMAIN_RE.match(v):
return jsonify({'error': 'Invalid domain: use only letters, digits, hyphens, dots'}), 400
if 'ip_range' in identity_updates:
_rfc1918 = [
ipaddress.ip_network('10.0.0.0/8'),
ipaddress.ip_network('172.16.0.0/12'),
ipaddress.ip_network('192.168.0.0/16'),
]
try:
_raw = str(identity_updates['ip_range'])
if '/' not in _raw:
return jsonify({'error': 'ip_range must include a CIDR prefix (e.g. 172.20.0.0/16)'}), 400
_net = ipaddress.ip_network(_raw, strict=False)
if not any(_net.subnet_of(r) for r in _rfc1918):
return jsonify({'error': (
'ip_range must be within an RFC-1918 private range '
'(10.0.0.0/8, 172.16.0.0/12, or 192.168.0.0/16)'
)}), 400
except ValueError as _e:
return jsonify({'error': f'Invalid ip_range: {_e}'}), 400
_port_fields = {
'network': ['dns_port'],
'wireguard': ['port'],
'email': ['smtp_port', 'submission_port', 'imap_port', 'webmail_port'],
'calendar': ['port'],
'files': ['port', 'manager_port'],
}
for _svc, _fields in _port_fields.items():
if _svc not in data:
continue
_svc_data = data[_svc]
if not isinstance(_svc_data, dict):
continue
for _f in _fields:
if _f in _svc_data and _svc_data[_f] is not None and _svc_data[_f] != '':
try:
_p = int(_svc_data[_f])
if not (1 <= _p <= 65535):
raise ValueError()
except (ValueError, TypeError):
return jsonify({'error': f'{_svc}.{_f} must be an integer between 1 and 65535'}), 400
_conflicts = detect_conflicts(config_manager.configs, data)
if _conflicts:
_msgs = [
f"port {_c['port']} is used by {', '.join(f'{_s}.{_f}' for _s, _f in _c['conflicts'])}"
for _c in _conflicts
]
return jsonify({'error': 'Port conflict: ' + '; '.join(_msgs)}), 409
if 'wireguard' in data and isinstance(data['wireguard'], dict):
_addr = data['wireguard'].get('address')
if _addr:
if '/' not in str(_addr):
return jsonify({'error': 'wireguard.address must include a prefix length (e.g. 10.0.0.1/24)'}), 400
try:
ipaddress.ip_interface(_addr)
except ValueError as _e:
return jsonify({'error': f'wireguard.address is not a valid IP/CIDR: {_e}'}), 400
old_identity = dict(config_manager.configs.get('_identity', {}))
old_svc_configs = {
svc: dict(config_manager.configs.get(svc, {}))
for svc in data if svc in config_manager.service_schemas
}
_pre_change_snapshot = {k: copy.deepcopy(v) for k, v in config_manager.configs.items()
if not k.startswith('_')}
_pre_change_snapshot['_identity'] = copy.deepcopy(config_manager.configs.get('_identity', {}))
if identity_updates:
stored = config_manager.configs.get('_identity', {})
stored.update(identity_updates)
config_manager.configs['_identity'] = stored
config_manager._save_all_configs()
_svc_managers = {
'network': network_manager,
'wireguard': wireguard_manager,
'email': email_manager,
'calendar': calendar_manager,
'files': file_manager,
'routing': routing_manager,
'vault': current_app.vault_manager,
}
all_restarted = []
all_warnings = []
for service, config in data.items():
if service in config_manager.service_schemas:
config_manager.update_service_config(service, config)
mgr = _svc_managers.get(service)
if mgr:
mgr.update_config(config)
result = mgr.apply_config(config)
all_restarted.extend(result.get('restarted', []))
all_warnings.extend(result.get('warnings', []))
service_bus.publish_event(EventType.CONFIG_CHANGED, service, {
'service': service, 'config': config
})
if service == 'wireguard' and ('port' in config or 'address' in config):
for p in peer_registry.list_peers():
peer_registry.update_peer(p['peer'], {'config_needs_reinstall': True})
n = len(peer_registry.list_peers())
if n:
all_warnings.append(f'WireGuard endpoint changed — {n} peer(s) must reinstall VPN config')
if 'port' in config:
_id = config_manager.configs.get('_identity', {})
_id['wireguard_port'] = config['port']
config_manager.configs['_identity'] = _id
config_manager._save_all_configs()
if identity_updates.get('domain') and identity_updates['domain'] != old_identity.get('domain', ''):
domain = identity_updates['domain']
net_result = network_manager.apply_domain(domain, reload=False)
all_warnings.extend(net_result.get('warnings', []))
_cur_id = config_manager.configs.get('_identity', {})
ip_utils.write_caddyfile(
_cur_id.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')),
_cur_id.get('cell_name', os.environ.get('CELL_NAME', 'mycell')),
domain, '/app/config-caddy/Caddyfile'
)
_set_pending_restart(
[f'domain changed to {domain}'],
['dns', 'caddy'],
pre_change_snapshot=_pre_change_snapshot,
)
if identity_updates.get('cell_name'):
old_name = old_identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell'))
new_name = identity_updates['cell_name']
if old_name != new_name:
cn_result = network_manager.apply_cell_name(old_name, new_name, reload=False)
all_warnings.extend(cn_result.get('warnings', []))
_cur_id2 = config_manager.configs.get('_identity', {})
ip_utils.write_caddyfile(
_cur_id2.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')),
new_name,
identity_updates.get('domain') or _cur_id2.get('domain', os.environ.get('CELL_DOMAIN', 'cell')),
'/app/config-caddy/Caddyfile'
)
_set_pending_restart(
[f'cell_name changed to {new_name}'],
['dns'],
pre_change_snapshot=_pre_change_snapshot,
)
if identity_updates.get('ip_range') and identity_updates['ip_range'] != old_identity.get('ip_range', ''):
new_range = identity_updates['ip_range']
cur_identity = config_manager.configs.get('_identity', {})
cur_cell_name = cur_identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell'))
cur_domain = cur_identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell'))
ip_result = network_manager.apply_ip_range(new_range, cur_cell_name, cur_domain)
all_restarted.extend(ip_result.get('restarted', []))
all_warnings.extend(ip_result.get('warnings', []))
firewall_manager.update_service_ips(new_range)
firewall_manager.ensure_caddy_virtual_ips()
env_file = os.environ.get('COMPOSE_ENV_FILE', '/app/.env.compose')
ip_utils.write_env_file(new_range, env_file, _collect_service_ports(config_manager.configs))
ip_utils.write_caddyfile(new_range, cur_cell_name, cur_domain, '/app/config-caddy/Caddyfile')
_set_pending_restart(
[f'ip_range changed to {new_range} — network will be recreated'],
['*'], network_recreate=True,
pre_change_snapshot=_pre_change_snapshot,
)
_PORT_CHANGE_MAP = {
('network', 'dns_port'): ('dns_port', ['dns']),
('wireguard','port'): ('wg_port', ['wireguard']),
('email', 'smtp_port'): ('mail_smtp_port', ['mail']),
('email', 'submission_port'): ('mail_submission_port', ['mail']),
('email', 'imap_port'): ('mail_imap_port', ['mail']),
('email', 'webmail_port'): ('rainloop_port', ['rainloop']),
('calendar', 'port'): ('radicale_port', ['radicale']),
('files', 'port'): ('webdav_port', ['webdav']),
('files', 'manager_port'): ('filegator_port', ['filegator']),
}
port_changed_containers = set()
port_change_messages = []
for (svc_key, field), (_env_key, containers) in _PORT_CHANGE_MAP.items():
if svc_key in data and field in data[svc_key]:
default_val = ip_utils.PORT_DEFAULTS.get(_env_key)
old_val = old_svc_configs.get(svc_key, {}).get(field, default_val)
new_val = data[svc_key][field]
if old_val != new_val:
port_changed_containers.update(containers)
port_change_messages.append(f'{svc_key} {field}: {old_val}{new_val}')
if 'wireguard_port' in identity_updates:
old_wg = old_identity.get('wireguard_port', ip_utils.PORT_DEFAULTS.get('wg_port', 51820))
new_wg = identity_updates['wireguard_port']
if old_wg != new_wg:
_wg_svc = config_manager.configs.get('wireguard', {})
_wg_svc['port'] = new_wg
config_manager.update_service_config('wireguard', _wg_svc)
wireguard_manager.apply_config({'port': new_wg})
port_changed_containers.add('wireguard')
port_change_messages.append(f'wireguard_port: {old_wg}{new_wg}')
if port_changed_containers:
env_file = os.environ.get('COMPOSE_ENV_FILE', '/app/.env.compose')
_ip_range = config_manager.configs.get('_identity', {}).get(
'ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')
)
ip_utils.write_env_file(_ip_range, env_file, _collect_service_ports(config_manager.configs))
_set_pending_restart(port_change_messages, list(port_changed_containers),
pre_change_snapshot=_pre_change_snapshot)
logger.info(f"Updated config, restarted: {all_restarted}")
return jsonify({
"message": "Configuration updated and applied",
"restarted": all_restarted,
"warnings": all_warnings,
})
except Exception as e:
logger.error(f"Error updating config: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/config/pending', methods=['GET'])
def get_pending_config():
from app import config_manager
pending = config_manager.configs.get('_pending_restart', {})
return jsonify({
'needs_restart': pending.get('needs_restart', False),
'applying': pending.get('applying', False),
'changed_at': pending.get('changed_at'),
'changes': pending.get('changes', []),
'containers': pending.get('containers', ['*']),
})
@bp.route('/api/config/pending', methods=['DELETE'])
def cancel_pending_config():
from app import config_manager, network_manager
import ip_utils as _ip_revert
pending = config_manager.configs.get('_pending_restart', {})
snapshot = pending.get('_snapshot', {})
if snapshot:
cur_identity = dict(config_manager.configs.get('_identity', {}))
old_identity = snapshot.get('_identity', {})
for k, v in snapshot.items():
config_manager.configs[k] = v
_id = config_manager.configs.get('_identity', {})
_dom = _id.get('domain', os.environ.get('CELL_DOMAIN', 'cell'))
cur_domain = cur_identity.get('domain', '')
old_domain = old_identity.get('domain', '')
if cur_domain and old_domain and cur_domain != old_domain:
network_manager.apply_domain(old_domain, reload=False)
cur_cell_name = cur_identity.get('cell_name', '')
old_cell_name = old_identity.get('cell_name', '')
if cur_cell_name and old_cell_name and cur_cell_name != old_cell_name:
network_manager.apply_cell_name(cur_cell_name, old_cell_name, reload=False)
_ip_revert.write_caddyfile(
_id.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')),
_id.get('cell_name', os.environ.get('CELL_NAME', 'mycell')),
_dom, '/app/config-caddy/Caddyfile'
)
_clear_pending_restart()
return jsonify({'message': 'Pending changes discarded'})
@bp.route('/api/config/apply', methods=['POST'])
def apply_pending_config():
try:
from app import config_manager
pending = config_manager.configs.get('_pending_restart', {})
if not pending.get('needs_restart'):
return jsonify({'message': 'No pending changes to apply'})
project_dir = '/home/roof/pic'
api_image = 'pic_api:latest'
data_host_path = '/home/roof/pic/data/api'
try:
import docker as _docker_sdk
_client = _docker_sdk.from_env()
_self = _client.containers.get('cell-api')
project_dir = _self.labels.get('com.docker.compose.project.working_dir', project_dir)
tags = _self.image.tags
if tags:
api_image = tags[0]
for _m in _self.attrs.get('Mounts', []):
if _m.get('Destination') == '/app/data':
data_host_path = _m.get('Source', data_host_path)
break
except Exception:
pass
containers = pending.get('containers', ['*'])
needs_network_recreate = pending.get('network_recreate', False)
host_env = os.path.join(project_dir, '.env')
host_compose = os.path.join(project_dir, 'docker-compose.yml')
if '*' in containers:
config_manager.configs['_pending_restart']['applying'] = True
config_manager._save_all_configs()
import base64 as _b64
_clear_py = (
"import json,os; p='/app/data/cell_config.json';"
"f=open(p); d=json.load(f); f.close();"
"d['_pending_restart']={'needs_restart':False,'changes':[],'containers':[],'network_recreate':False};"
"tmp=p+'.tmp'; open(tmp,'w').write(json.dumps(d,indent=2)); os.replace(tmp,p)"
)
_b64_cmd = _b64.b64encode(_clear_py.encode()).decode()
clear_flag_cmd = f"python3 -c \"import base64; exec(base64.b64decode('{_b64_cmd}').decode())\""
if needs_network_recreate:
helper_script = (
f'sleep 2'
f' && docker compose --project-directory {project_dir}'
f' -f {host_compose} --env-file {host_env} down'
f' && {clear_flag_cmd}'
f' && docker compose --project-directory {project_dir}'
f' -f {host_compose} --env-file {host_env} up -d'
)
else:
helper_script = (
f'sleep 2'
f' && {clear_flag_cmd}'
f' && docker compose --project-directory {project_dir}'
f' -f {host_compose} --env-file {host_env} up -d'
)
def _do_apply():
import subprocess as _subprocess
_subprocess.Popen(
['docker', 'run', '--rm',
'-v', '/var/run/docker.sock:/var/run/docker.sock',
'-v', f'{project_dir}:{project_dir}',
'-v', f'{data_host_path}:/app/data',
'--entrypoint', 'sh',
api_image,
'-c', helper_script],
close_fds=True,
stdout=_subprocess.DEVNULL,
stderr=_subprocess.DEVNULL,
)
logger.info(
'spawned helper container for all-services restart'
+ (' (network_recreate)' if needs_network_recreate else '')
)
else:
def _do_apply():
import time as _time
import subprocess as _subprocess
_time.sleep(0.3)
result = _subprocess.run(
['docker', 'compose',
'--project-directory', project_dir,
'-f', '/app/docker-compose.yml',
'--env-file', '/app/.env.compose',
'up', '-d', '--no-deps', '--force-recreate'] + containers,
capture_output=True, text=True, timeout=120,
)
if result.returncode != 0:
logger.error(f"docker compose up failed: {result.stderr.strip()}")
else:
logger.info(f'docker compose up completed for: {containers}')
_clear_pending_restart()
threading.Thread(target=_do_apply, daemon=False).start()
return jsonify({
'message': 'Applying configuration — containers are restarting',
'restart_in_progress': True,
})
except Exception as e:
logger.error(f"Error applying config: {e}")
return jsonify({'error': str(e)}), 500
@bp.route('/api/config/backup', methods=['POST'])
def create_config_backup():
try:
from app import config_manager, service_bus, EventType
backup_id = config_manager.backup_config()
service_bus.publish_event(EventType.BACKUP_CREATED, 'api', {
'backup_id': backup_id,
'timestamp': datetime.utcnow().isoformat()
})
return jsonify({"backup_id": backup_id})
except Exception as e:
logger.error(f"Error creating backup: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/config/backups', methods=['GET'])
def list_config_backups():
try:
from app import config_manager
return jsonify(config_manager.list_backups())
except Exception as e:
logger.error(f"Error listing backups: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/config/restore/<backup_id>', methods=['POST'])
def restore_config(backup_id):
try:
from app import config_manager, service_bus, EventType
data = request.get_json(silent=True) or {}
success = config_manager.restore_config(backup_id, services=data.get('services'))
if success:
service_bus.publish_event(EventType.RESTORE_COMPLETED, 'api', {
'backup_id': backup_id,
'timestamp': datetime.utcnow().isoformat()
})
return jsonify({"message": f"Configuration restored from backup: {backup_id}"})
return jsonify({"error": f"Failed to restore backup: {backup_id}"}), 500
except Exception as e:
logger.error(f"Error restoring backup: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/config/export', methods=['GET'])
def export_config():
try:
from app import config_manager
format = request.args.get('format', 'json')
config_data = config_manager.export_config(format)
return jsonify({"config": config_data, "format": format})
except Exception as e:
logger.error(f"Error exporting config: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/config/import', methods=['POST'])
def import_config():
try:
from app import config_manager
data = request.get_json(silent=True)
if data is None:
return jsonify({"error": "No data provided"}), 400
success = config_manager.import_config(data.get('config'), data.get('format', 'json'))
if success:
return jsonify({"message": "Configuration imported successfully"})
return jsonify({"error": "Failed to import configuration"}), 500
except Exception as e:
logger.error(f"Error importing config: {e}")
return jsonify({"error": str(e)}), 500
@bp.route('/api/config/backups/<backup_id>/download', methods=['GET'])
def download_backup(backup_id):
try:
from app import config_manager
backup_path = config_manager.backup_dir / backup_id
if not backup_path.exists():
return jsonify({'error': f'Backup {backup_id} not found'}), 404
buf = io.BytesIO()
with zipfile.ZipFile(buf, 'w', zipfile.ZIP_DEFLATED) as zf:
for f in backup_path.rglob('*'):
if f.is_file():
zf.write(f, f.relative_to(backup_path))
buf.seek(0)
return send_file(buf, mimetype='application/zip',
as_attachment=True,
download_name=f'{backup_id}.zip')
except Exception as e:
logger.error(f"Error downloading backup: {e}")
return jsonify({'error': str(e)}), 500
@bp.route('/api/config/backup/upload', methods=['POST'])
def upload_backup():
try:
from app import config_manager
if 'file' not in request.files:
return jsonify({'error': 'No file provided'}), 400
f = request.files['file']
filename = f.filename or ''
if filename.endswith('.zip'):
backup_id = filename[:-4]
else:
backup_id = f"backup_{datetime.utcnow().strftime('%Y%m%d_%H%M%S')}"
backup_id = ''.join(c for c in backup_id if c.isalnum() or c == '_')
backup_path = config_manager.backup_dir / backup_id
backup_path.mkdir(parents=True, exist_ok=True)
try:
with zipfile.ZipFile(io.BytesIO(f.read())) as zf:
zf.extractall(backup_path)
except zipfile.BadZipFile:
shutil.rmtree(backup_path, ignore_errors=True)
return jsonify({'error': 'Invalid zip file'}), 400
if not (backup_path / 'manifest.json').exists():
shutil.rmtree(backup_path, ignore_errors=True)
return jsonify({'error': 'Invalid backup: missing manifest.json'}), 400
return jsonify({'backup_id': backup_id})
except Exception as e:
logger.error(f"Error uploading backup: {e}")
return jsonify({'error': str(e)}), 500
@bp.route('/api/config/backups/<backup_id>', methods=['DELETE'])
def delete_config_backup(backup_id):
try:
from app import config_manager
success = config_manager.delete_backup(backup_id)
if success:
return jsonify({"message": f"Backup {backup_id} deleted"})
return jsonify({"error": f"Failed to delete backup {backup_id}"}), 500
except Exception as e:
logger.error(f"Error deleting backup: {e}")
return jsonify({"error": str(e)}), 500