A5: Extract config routes into blueprint (app.py 1294 → 579 lines)
Move all /api/config/* routes and pending-restart helpers into
routes/config.py. Re-export helpers from app.py for backward compat:
from routes.config import _set_pending_restart, _clear_pending_restart,
_collect_service_ports, _dedup_changes
Test patches updated:
app._set_pending_restart → routes.config._set_pending_restart
app._clear_pending_restart → routes.config._clear_pending_restart
app.threading.Thread → routes.config.threading.Thread
Remaining in app.py: Flask setup, middleware, health monitor thread,
/health, /api/status, /api/health/history* (use module-level state).
1021 tests passing.
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -0,0 +1,673 @@
|
||||
import io
|
||||
import os
|
||||
import re
|
||||
import copy
|
||||
import json
|
||||
import ipaddress
|
||||
import zipfile
|
||||
import shutil
|
||||
import logging
|
||||
import threading
|
||||
from datetime import datetime
|
||||
from flask import Blueprint, request, jsonify, send_file, current_app
|
||||
logger = logging.getLogger('picell')
|
||||
bp = Blueprint('config', __name__)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Pending-restart helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _collect_service_ports(configs: dict) -> dict:
|
||||
"""Extract current port values from service configs for .env generation."""
|
||||
from app import config_manager as _cm
|
||||
ports = {}
|
||||
net = configs.get('network', {})
|
||||
wg = configs.get('wireguard', {})
|
||||
email = configs.get('email', {})
|
||||
cal = configs.get('calendar', {})
|
||||
files = configs.get('files', {})
|
||||
identity = configs.get('_identity', {})
|
||||
|
||||
if 'dns_port' in net: ports['dns_port'] = net['dns_port']
|
||||
if 'port' in wg: ports['wg_port'] = wg['port']
|
||||
elif 'wireguard_port' in identity: ports['wg_port'] = identity['wireguard_port']
|
||||
if 'smtp_port' in email: ports['mail_smtp_port'] = email['smtp_port']
|
||||
if 'submission_port' in email: ports['mail_submission_port'] = email['submission_port']
|
||||
if 'imap_port' in email: ports['mail_imap_port'] = email['imap_port']
|
||||
if 'webmail_port' in email: ports['rainloop_port'] = email['webmail_port']
|
||||
if 'port' in cal: ports['radicale_port'] = cal['port']
|
||||
if 'port' in files: ports['webdav_port'] = files['port']
|
||||
if 'manager_port' in files: ports['filegator_port'] = files['manager_port']
|
||||
return ports
|
||||
|
||||
|
||||
def _dedup_changes(existing: list, new: list) -> list:
|
||||
"""Merge change lists, keeping only the latest entry per config key."""
|
||||
def key_of(msg: str) -> str:
|
||||
if ' changed' in msg:
|
||||
return msg.split(' changed')[0].strip()
|
||||
if ':' in msg:
|
||||
return msg.split(':')[0].strip()
|
||||
return msg
|
||||
merged = {key_of(c): c for c in existing}
|
||||
merged.update({key_of(c): c for c in new})
|
||||
return list(merged.values())
|
||||
|
||||
|
||||
def _set_pending_restart(changes: list, containers: list = None, network_recreate: bool = False,
|
||||
pre_change_snapshot: dict = None):
|
||||
"""Record that specific containers need to be restarted to apply configuration."""
|
||||
from app import config_manager
|
||||
existing = config_manager.configs.get('_pending_restart', {})
|
||||
existing_changes = existing.get('changes', []) if existing.get('needs_restart') else []
|
||||
existing_containers = existing.get('containers', []) if existing.get('needs_restart') else []
|
||||
|
||||
if not existing.get('needs_restart'):
|
||||
snapshot = pre_change_snapshot or {}
|
||||
else:
|
||||
snapshot = existing.get('_snapshot', {})
|
||||
|
||||
if containers is None or '*' in (containers or []) or existing_containers == ['*']:
|
||||
new_containers = ['*']
|
||||
else:
|
||||
new_containers = list(set(existing_containers) | set(containers))
|
||||
|
||||
config_manager.configs['_pending_restart'] = {
|
||||
'needs_restart': True,
|
||||
'changed_at': datetime.utcnow().isoformat(),
|
||||
'changes': _dedup_changes(existing_changes, changes),
|
||||
'containers': new_containers,
|
||||
'network_recreate': network_recreate or existing.get('network_recreate', False),
|
||||
'_snapshot': snapshot,
|
||||
}
|
||||
config_manager._save_all_configs()
|
||||
|
||||
|
||||
def _clear_pending_restart():
|
||||
from app import config_manager
|
||||
config_manager.configs['_pending_restart'] = {
|
||||
'needs_restart': False, 'changes': [], 'containers': [], 'network_recreate': False
|
||||
}
|
||||
config_manager._save_all_configs()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Config routes
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@bp.route('/api/config', methods=['GET'])
|
||||
def get_config():
|
||||
try:
|
||||
from app import config_manager
|
||||
import ip_utils as _ip_utils_cfg
|
||||
service_configs = config_manager.get_all_configs()
|
||||
identity = service_configs.pop('_identity', {})
|
||||
config = {
|
||||
'cell_name': identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell')),
|
||||
'domain': identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell')),
|
||||
'ip_range': identity.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')),
|
||||
'wireguard_port': identity.get('wireguard_port', int(os.environ.get('WG_PORT', '51820'))),
|
||||
}
|
||||
_ips = _ip_utils_cfg.get_service_ips(config['ip_range'])
|
||||
config['service_ips'] = {
|
||||
'dns': _ips['dns'],
|
||||
'vip_mail': _ips['vip_mail'],
|
||||
'vip_calendar': _ips['vip_calendar'],
|
||||
'vip_files': _ips['vip_files'],
|
||||
'vip_webdav': _ips['vip_webdav'],
|
||||
}
|
||||
config['service_configs'] = service_configs
|
||||
return jsonify(config)
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting config: {e}")
|
||||
return jsonify({"error": str(e)}), 500
|
||||
|
||||
|
||||
@bp.route('/api/config', methods=['PUT'])
|
||||
def update_config():
|
||||
try:
|
||||
from app import (config_manager, network_manager, wireguard_manager, email_manager,
|
||||
calendar_manager, file_manager, routing_manager,
|
||||
peer_registry, firewall_manager, service_bus, EventType, detect_conflicts)
|
||||
import ip_utils
|
||||
data = request.get_json(silent=True)
|
||||
if data is None:
|
||||
return jsonify({"error": "No data provided"}), 400
|
||||
|
||||
identity_keys = {'cell_name', 'domain', 'ip_range', 'wireguard_port'}
|
||||
identity_updates = {k: v for k, v in data.items() if k in identity_keys}
|
||||
|
||||
_CELL_NAME_RE = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9-]{0,254}$')
|
||||
_DOMAIN_RE = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9.-]{0,254}$')
|
||||
|
||||
if 'cell_name' in identity_updates:
|
||||
v = str(identity_updates['cell_name'])
|
||||
if not v:
|
||||
return jsonify({'error': 'cell_name cannot be empty'}), 400
|
||||
if len(v) > 255:
|
||||
return jsonify({'error': 'cell_name must be 255 characters or fewer'}), 400
|
||||
if not _CELL_NAME_RE.match(v):
|
||||
return jsonify({'error': 'Invalid cell_name: use only letters, digits, hyphens'}), 400
|
||||
|
||||
if 'domain' in identity_updates:
|
||||
v = str(identity_updates['domain'])
|
||||
if not v:
|
||||
return jsonify({'error': 'domain cannot be empty'}), 400
|
||||
if len(v) > 255:
|
||||
return jsonify({'error': 'domain must be 255 characters or fewer'}), 400
|
||||
if not _DOMAIN_RE.match(v):
|
||||
return jsonify({'error': 'Invalid domain: use only letters, digits, hyphens, dots'}), 400
|
||||
|
||||
if 'ip_range' in identity_updates:
|
||||
_rfc1918 = [
|
||||
ipaddress.ip_network('10.0.0.0/8'),
|
||||
ipaddress.ip_network('172.16.0.0/12'),
|
||||
ipaddress.ip_network('192.168.0.0/16'),
|
||||
]
|
||||
try:
|
||||
_raw = str(identity_updates['ip_range'])
|
||||
if '/' not in _raw:
|
||||
return jsonify({'error': 'ip_range must include a CIDR prefix (e.g. 172.20.0.0/16)'}), 400
|
||||
_net = ipaddress.ip_network(_raw, strict=False)
|
||||
if not any(_net.subnet_of(r) for r in _rfc1918):
|
||||
return jsonify({'error': (
|
||||
'ip_range must be within an RFC-1918 private range '
|
||||
'(10.0.0.0/8, 172.16.0.0/12, or 192.168.0.0/16)'
|
||||
)}), 400
|
||||
except ValueError as _e:
|
||||
return jsonify({'error': f'Invalid ip_range: {_e}'}), 400
|
||||
|
||||
_port_fields = {
|
||||
'network': ['dns_port'],
|
||||
'wireguard': ['port'],
|
||||
'email': ['smtp_port', 'submission_port', 'imap_port', 'webmail_port'],
|
||||
'calendar': ['port'],
|
||||
'files': ['port', 'manager_port'],
|
||||
}
|
||||
for _svc, _fields in _port_fields.items():
|
||||
if _svc not in data:
|
||||
continue
|
||||
_svc_data = data[_svc]
|
||||
if not isinstance(_svc_data, dict):
|
||||
continue
|
||||
for _f in _fields:
|
||||
if _f in _svc_data and _svc_data[_f] is not None and _svc_data[_f] != '':
|
||||
try:
|
||||
_p = int(_svc_data[_f])
|
||||
if not (1 <= _p <= 65535):
|
||||
raise ValueError()
|
||||
except (ValueError, TypeError):
|
||||
return jsonify({'error': f'{_svc}.{_f} must be an integer between 1 and 65535'}), 400
|
||||
|
||||
_conflicts = detect_conflicts(config_manager.configs, data)
|
||||
if _conflicts:
|
||||
_msgs = [
|
||||
f"port {_c['port']} is used by {', '.join(f'{_s}.{_f}' for _s, _f in _c['conflicts'])}"
|
||||
for _c in _conflicts
|
||||
]
|
||||
return jsonify({'error': 'Port conflict: ' + '; '.join(_msgs)}), 409
|
||||
|
||||
if 'wireguard' in data and isinstance(data['wireguard'], dict):
|
||||
_addr = data['wireguard'].get('address')
|
||||
if _addr:
|
||||
if '/' not in str(_addr):
|
||||
return jsonify({'error': 'wireguard.address must include a prefix length (e.g. 10.0.0.1/24)'}), 400
|
||||
try:
|
||||
ipaddress.ip_interface(_addr)
|
||||
except ValueError as _e:
|
||||
return jsonify({'error': f'wireguard.address is not a valid IP/CIDR: {_e}'}), 400
|
||||
|
||||
old_identity = dict(config_manager.configs.get('_identity', {}))
|
||||
old_svc_configs = {
|
||||
svc: dict(config_manager.configs.get(svc, {}))
|
||||
for svc in data if svc in config_manager.service_schemas
|
||||
}
|
||||
_pre_change_snapshot = {k: copy.deepcopy(v) for k, v in config_manager.configs.items()
|
||||
if not k.startswith('_')}
|
||||
_pre_change_snapshot['_identity'] = copy.deepcopy(config_manager.configs.get('_identity', {}))
|
||||
|
||||
if identity_updates:
|
||||
stored = config_manager.configs.get('_identity', {})
|
||||
stored.update(identity_updates)
|
||||
config_manager.configs['_identity'] = stored
|
||||
config_manager._save_all_configs()
|
||||
|
||||
_svc_managers = {
|
||||
'network': network_manager,
|
||||
'wireguard': wireguard_manager,
|
||||
'email': email_manager,
|
||||
'calendar': calendar_manager,
|
||||
'files': file_manager,
|
||||
'routing': routing_manager,
|
||||
'vault': current_app.vault_manager,
|
||||
}
|
||||
|
||||
all_restarted = []
|
||||
all_warnings = []
|
||||
|
||||
for service, config in data.items():
|
||||
if service in config_manager.service_schemas:
|
||||
config_manager.update_service_config(service, config)
|
||||
mgr = _svc_managers.get(service)
|
||||
if mgr:
|
||||
mgr.update_config(config)
|
||||
result = mgr.apply_config(config)
|
||||
all_restarted.extend(result.get('restarted', []))
|
||||
all_warnings.extend(result.get('warnings', []))
|
||||
service_bus.publish_event(EventType.CONFIG_CHANGED, service, {
|
||||
'service': service, 'config': config
|
||||
})
|
||||
if service == 'wireguard' and ('port' in config or 'address' in config):
|
||||
for p in peer_registry.list_peers():
|
||||
peer_registry.update_peer(p['peer'], {'config_needs_reinstall': True})
|
||||
n = len(peer_registry.list_peers())
|
||||
if n:
|
||||
all_warnings.append(f'WireGuard endpoint changed — {n} peer(s) must reinstall VPN config')
|
||||
if 'port' in config:
|
||||
_id = config_manager.configs.get('_identity', {})
|
||||
_id['wireguard_port'] = config['port']
|
||||
config_manager.configs['_identity'] = _id
|
||||
config_manager._save_all_configs()
|
||||
|
||||
if identity_updates.get('domain') and identity_updates['domain'] != old_identity.get('domain', ''):
|
||||
domain = identity_updates['domain']
|
||||
net_result = network_manager.apply_domain(domain, reload=False)
|
||||
all_warnings.extend(net_result.get('warnings', []))
|
||||
_cur_id = config_manager.configs.get('_identity', {})
|
||||
ip_utils.write_caddyfile(
|
||||
_cur_id.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')),
|
||||
_cur_id.get('cell_name', os.environ.get('CELL_NAME', 'mycell')),
|
||||
domain, '/app/config-caddy/Caddyfile'
|
||||
)
|
||||
_set_pending_restart(
|
||||
[f'domain changed to {domain}'],
|
||||
['dns', 'caddy'],
|
||||
pre_change_snapshot=_pre_change_snapshot,
|
||||
)
|
||||
|
||||
if identity_updates.get('cell_name'):
|
||||
old_name = old_identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell'))
|
||||
new_name = identity_updates['cell_name']
|
||||
if old_name != new_name:
|
||||
cn_result = network_manager.apply_cell_name(old_name, new_name, reload=False)
|
||||
all_warnings.extend(cn_result.get('warnings', []))
|
||||
_cur_id2 = config_manager.configs.get('_identity', {})
|
||||
ip_utils.write_caddyfile(
|
||||
_cur_id2.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')),
|
||||
new_name,
|
||||
identity_updates.get('domain') or _cur_id2.get('domain', os.environ.get('CELL_DOMAIN', 'cell')),
|
||||
'/app/config-caddy/Caddyfile'
|
||||
)
|
||||
_set_pending_restart(
|
||||
[f'cell_name changed to {new_name}'],
|
||||
['dns'],
|
||||
pre_change_snapshot=_pre_change_snapshot,
|
||||
)
|
||||
|
||||
if identity_updates.get('ip_range') and identity_updates['ip_range'] != old_identity.get('ip_range', ''):
|
||||
new_range = identity_updates['ip_range']
|
||||
cur_identity = config_manager.configs.get('_identity', {})
|
||||
cur_cell_name = cur_identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell'))
|
||||
cur_domain = cur_identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell'))
|
||||
ip_result = network_manager.apply_ip_range(new_range, cur_cell_name, cur_domain)
|
||||
all_restarted.extend(ip_result.get('restarted', []))
|
||||
all_warnings.extend(ip_result.get('warnings', []))
|
||||
firewall_manager.update_service_ips(new_range)
|
||||
firewall_manager.ensure_caddy_virtual_ips()
|
||||
env_file = os.environ.get('COMPOSE_ENV_FILE', '/app/.env.compose')
|
||||
ip_utils.write_env_file(new_range, env_file, _collect_service_ports(config_manager.configs))
|
||||
ip_utils.write_caddyfile(new_range, cur_cell_name, cur_domain, '/app/config-caddy/Caddyfile')
|
||||
_set_pending_restart(
|
||||
[f'ip_range changed to {new_range} — network will be recreated'],
|
||||
['*'], network_recreate=True,
|
||||
pre_change_snapshot=_pre_change_snapshot,
|
||||
)
|
||||
|
||||
_PORT_CHANGE_MAP = {
|
||||
('network', 'dns_port'): ('dns_port', ['dns']),
|
||||
('wireguard','port'): ('wg_port', ['wireguard']),
|
||||
('email', 'smtp_port'): ('mail_smtp_port', ['mail']),
|
||||
('email', 'submission_port'): ('mail_submission_port', ['mail']),
|
||||
('email', 'imap_port'): ('mail_imap_port', ['mail']),
|
||||
('email', 'webmail_port'): ('rainloop_port', ['rainloop']),
|
||||
('calendar', 'port'): ('radicale_port', ['radicale']),
|
||||
('files', 'port'): ('webdav_port', ['webdav']),
|
||||
('files', 'manager_port'): ('filegator_port', ['filegator']),
|
||||
}
|
||||
|
||||
port_changed_containers = set()
|
||||
port_change_messages = []
|
||||
|
||||
for (svc_key, field), (_env_key, containers) in _PORT_CHANGE_MAP.items():
|
||||
if svc_key in data and field in data[svc_key]:
|
||||
default_val = ip_utils.PORT_DEFAULTS.get(_env_key)
|
||||
old_val = old_svc_configs.get(svc_key, {}).get(field, default_val)
|
||||
new_val = data[svc_key][field]
|
||||
if old_val != new_val:
|
||||
port_changed_containers.update(containers)
|
||||
port_change_messages.append(f'{svc_key} {field}: {old_val} → {new_val}')
|
||||
|
||||
if 'wireguard_port' in identity_updates:
|
||||
old_wg = old_identity.get('wireguard_port', ip_utils.PORT_DEFAULTS.get('wg_port', 51820))
|
||||
new_wg = identity_updates['wireguard_port']
|
||||
if old_wg != new_wg:
|
||||
_wg_svc = config_manager.configs.get('wireguard', {})
|
||||
_wg_svc['port'] = new_wg
|
||||
config_manager.update_service_config('wireguard', _wg_svc)
|
||||
wireguard_manager.apply_config({'port': new_wg})
|
||||
port_changed_containers.add('wireguard')
|
||||
port_change_messages.append(f'wireguard_port: {old_wg} → {new_wg}')
|
||||
|
||||
if port_changed_containers:
|
||||
env_file = os.environ.get('COMPOSE_ENV_FILE', '/app/.env.compose')
|
||||
_ip_range = config_manager.configs.get('_identity', {}).get(
|
||||
'ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')
|
||||
)
|
||||
ip_utils.write_env_file(_ip_range, env_file, _collect_service_ports(config_manager.configs))
|
||||
_set_pending_restart(port_change_messages, list(port_changed_containers),
|
||||
pre_change_snapshot=_pre_change_snapshot)
|
||||
|
||||
logger.info(f"Updated config, restarted: {all_restarted}")
|
||||
return jsonify({
|
||||
"message": "Configuration updated and applied",
|
||||
"restarted": all_restarted,
|
||||
"warnings": all_warnings,
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating config: {e}")
|
||||
return jsonify({"error": str(e)}), 500
|
||||
|
||||
|
||||
@bp.route('/api/config/pending', methods=['GET'])
|
||||
def get_pending_config():
|
||||
from app import config_manager
|
||||
pending = config_manager.configs.get('_pending_restart', {})
|
||||
return jsonify({
|
||||
'needs_restart': pending.get('needs_restart', False),
|
||||
'applying': pending.get('applying', False),
|
||||
'changed_at': pending.get('changed_at'),
|
||||
'changes': pending.get('changes', []),
|
||||
'containers': pending.get('containers', ['*']),
|
||||
})
|
||||
|
||||
|
||||
@bp.route('/api/config/pending', methods=['DELETE'])
|
||||
def cancel_pending_config():
|
||||
from app import config_manager, network_manager
|
||||
import ip_utils as _ip_revert
|
||||
pending = config_manager.configs.get('_pending_restart', {})
|
||||
snapshot = pending.get('_snapshot', {})
|
||||
if snapshot:
|
||||
cur_identity = dict(config_manager.configs.get('_identity', {}))
|
||||
old_identity = snapshot.get('_identity', {})
|
||||
|
||||
for k, v in snapshot.items():
|
||||
config_manager.configs[k] = v
|
||||
|
||||
_id = config_manager.configs.get('_identity', {})
|
||||
_dom = _id.get('domain', os.environ.get('CELL_DOMAIN', 'cell'))
|
||||
|
||||
cur_domain = cur_identity.get('domain', '')
|
||||
old_domain = old_identity.get('domain', '')
|
||||
if cur_domain and old_domain and cur_domain != old_domain:
|
||||
network_manager.apply_domain(old_domain, reload=False)
|
||||
|
||||
cur_cell_name = cur_identity.get('cell_name', '')
|
||||
old_cell_name = old_identity.get('cell_name', '')
|
||||
if cur_cell_name and old_cell_name and cur_cell_name != old_cell_name:
|
||||
network_manager.apply_cell_name(cur_cell_name, old_cell_name, reload=False)
|
||||
|
||||
_ip_revert.write_caddyfile(
|
||||
_id.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')),
|
||||
_id.get('cell_name', os.environ.get('CELL_NAME', 'mycell')),
|
||||
_dom, '/app/config-caddy/Caddyfile'
|
||||
)
|
||||
|
||||
_clear_pending_restart()
|
||||
return jsonify({'message': 'Pending changes discarded'})
|
||||
|
||||
|
||||
@bp.route('/api/config/apply', methods=['POST'])
|
||||
def apply_pending_config():
|
||||
try:
|
||||
from app import config_manager
|
||||
pending = config_manager.configs.get('_pending_restart', {})
|
||||
if not pending.get('needs_restart'):
|
||||
return jsonify({'message': 'No pending changes to apply'})
|
||||
|
||||
project_dir = '/home/roof/pic'
|
||||
api_image = 'pic_api:latest'
|
||||
data_host_path = '/home/roof/pic/data/api'
|
||||
try:
|
||||
import docker as _docker_sdk
|
||||
_client = _docker_sdk.from_env()
|
||||
_self = _client.containers.get('cell-api')
|
||||
project_dir = _self.labels.get('com.docker.compose.project.working_dir', project_dir)
|
||||
tags = _self.image.tags
|
||||
if tags:
|
||||
api_image = tags[0]
|
||||
for _m in _self.attrs.get('Mounts', []):
|
||||
if _m.get('Destination') == '/app/data':
|
||||
data_host_path = _m.get('Source', data_host_path)
|
||||
break
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
containers = pending.get('containers', ['*'])
|
||||
needs_network_recreate = pending.get('network_recreate', False)
|
||||
|
||||
host_env = os.path.join(project_dir, '.env')
|
||||
host_compose = os.path.join(project_dir, 'docker-compose.yml')
|
||||
|
||||
if '*' in containers:
|
||||
config_manager.configs['_pending_restart']['applying'] = True
|
||||
config_manager._save_all_configs()
|
||||
|
||||
import base64 as _b64
|
||||
_clear_py = (
|
||||
"import json,os; p='/app/data/cell_config.json';"
|
||||
"f=open(p); d=json.load(f); f.close();"
|
||||
"d['_pending_restart']={'needs_restart':False,'changes':[],'containers':[],'network_recreate':False};"
|
||||
"tmp=p+'.tmp'; open(tmp,'w').write(json.dumps(d,indent=2)); os.replace(tmp,p)"
|
||||
)
|
||||
_b64_cmd = _b64.b64encode(_clear_py.encode()).decode()
|
||||
clear_flag_cmd = f"python3 -c \"import base64; exec(base64.b64decode('{_b64_cmd}').decode())\""
|
||||
|
||||
if needs_network_recreate:
|
||||
helper_script = (
|
||||
f'sleep 2'
|
||||
f' && docker compose --project-directory {project_dir}'
|
||||
f' -f {host_compose} --env-file {host_env} down'
|
||||
f' && {clear_flag_cmd}'
|
||||
f' && docker compose --project-directory {project_dir}'
|
||||
f' -f {host_compose} --env-file {host_env} up -d'
|
||||
)
|
||||
else:
|
||||
helper_script = (
|
||||
f'sleep 2'
|
||||
f' && {clear_flag_cmd}'
|
||||
f' && docker compose --project-directory {project_dir}'
|
||||
f' -f {host_compose} --env-file {host_env} up -d'
|
||||
)
|
||||
|
||||
def _do_apply():
|
||||
import subprocess as _subprocess
|
||||
_subprocess.Popen(
|
||||
['docker', 'run', '--rm',
|
||||
'-v', '/var/run/docker.sock:/var/run/docker.sock',
|
||||
'-v', f'{project_dir}:{project_dir}',
|
||||
'-v', f'{data_host_path}:/app/data',
|
||||
'--entrypoint', 'sh',
|
||||
api_image,
|
||||
'-c', helper_script],
|
||||
close_fds=True,
|
||||
stdout=_subprocess.DEVNULL,
|
||||
stderr=_subprocess.DEVNULL,
|
||||
)
|
||||
logger.info(
|
||||
'spawned helper container for all-services restart'
|
||||
+ (' (network_recreate)' if needs_network_recreate else '')
|
||||
)
|
||||
else:
|
||||
def _do_apply():
|
||||
import time as _time
|
||||
import subprocess as _subprocess
|
||||
_time.sleep(0.3)
|
||||
result = _subprocess.run(
|
||||
['docker', 'compose',
|
||||
'--project-directory', project_dir,
|
||||
'-f', '/app/docker-compose.yml',
|
||||
'--env-file', '/app/.env.compose',
|
||||
'up', '-d', '--no-deps', '--force-recreate'] + containers,
|
||||
capture_output=True, text=True, timeout=120,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
logger.error(f"docker compose up failed: {result.stderr.strip()}")
|
||||
else:
|
||||
logger.info(f'docker compose up completed for: {containers}')
|
||||
_clear_pending_restart()
|
||||
|
||||
threading.Thread(target=_do_apply, daemon=False).start()
|
||||
return jsonify({
|
||||
'message': 'Applying configuration — containers are restarting',
|
||||
'restart_in_progress': True,
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error applying config: {e}")
|
||||
return jsonify({'error': str(e)}), 500
|
||||
|
||||
|
||||
@bp.route('/api/config/backup', methods=['POST'])
|
||||
def create_config_backup():
|
||||
try:
|
||||
from app import config_manager, service_bus, EventType
|
||||
backup_id = config_manager.backup_config()
|
||||
service_bus.publish_event(EventType.BACKUP_CREATED, 'api', {
|
||||
'backup_id': backup_id,
|
||||
'timestamp': datetime.utcnow().isoformat()
|
||||
})
|
||||
return jsonify({"backup_id": backup_id})
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating backup: {e}")
|
||||
return jsonify({"error": str(e)}), 500
|
||||
|
||||
|
||||
@bp.route('/api/config/backups', methods=['GET'])
|
||||
def list_config_backups():
|
||||
try:
|
||||
from app import config_manager
|
||||
return jsonify(config_manager.list_backups())
|
||||
except Exception as e:
|
||||
logger.error(f"Error listing backups: {e}")
|
||||
return jsonify({"error": str(e)}), 500
|
||||
|
||||
|
||||
@bp.route('/api/config/restore/<backup_id>', methods=['POST'])
|
||||
def restore_config(backup_id):
|
||||
try:
|
||||
from app import config_manager, service_bus, EventType
|
||||
data = request.get_json(silent=True) or {}
|
||||
success = config_manager.restore_config(backup_id, services=data.get('services'))
|
||||
if success:
|
||||
service_bus.publish_event(EventType.RESTORE_COMPLETED, 'api', {
|
||||
'backup_id': backup_id,
|
||||
'timestamp': datetime.utcnow().isoformat()
|
||||
})
|
||||
return jsonify({"message": f"Configuration restored from backup: {backup_id}"})
|
||||
return jsonify({"error": f"Failed to restore backup: {backup_id}"}), 500
|
||||
except Exception as e:
|
||||
logger.error(f"Error restoring backup: {e}")
|
||||
return jsonify({"error": str(e)}), 500
|
||||
|
||||
|
||||
@bp.route('/api/config/export', methods=['GET'])
|
||||
def export_config():
|
||||
try:
|
||||
from app import config_manager
|
||||
format = request.args.get('format', 'json')
|
||||
config_data = config_manager.export_config(format)
|
||||
return jsonify({"config": config_data, "format": format})
|
||||
except Exception as e:
|
||||
logger.error(f"Error exporting config: {e}")
|
||||
return jsonify({"error": str(e)}), 500
|
||||
|
||||
|
||||
@bp.route('/api/config/import', methods=['POST'])
|
||||
def import_config():
|
||||
try:
|
||||
from app import config_manager
|
||||
data = request.get_json(silent=True)
|
||||
if data is None:
|
||||
return jsonify({"error": "No data provided"}), 400
|
||||
success = config_manager.import_config(data.get('config'), data.get('format', 'json'))
|
||||
if success:
|
||||
return jsonify({"message": "Configuration imported successfully"})
|
||||
return jsonify({"error": "Failed to import configuration"}), 500
|
||||
except Exception as e:
|
||||
logger.error(f"Error importing config: {e}")
|
||||
return jsonify({"error": str(e)}), 500
|
||||
|
||||
|
||||
@bp.route('/api/config/backups/<backup_id>/download', methods=['GET'])
|
||||
def download_backup(backup_id):
|
||||
try:
|
||||
from app import config_manager
|
||||
backup_path = config_manager.backup_dir / backup_id
|
||||
if not backup_path.exists():
|
||||
return jsonify({'error': f'Backup {backup_id} not found'}), 404
|
||||
buf = io.BytesIO()
|
||||
with zipfile.ZipFile(buf, 'w', zipfile.ZIP_DEFLATED) as zf:
|
||||
for f in backup_path.rglob('*'):
|
||||
if f.is_file():
|
||||
zf.write(f, f.relative_to(backup_path))
|
||||
buf.seek(0)
|
||||
return send_file(buf, mimetype='application/zip',
|
||||
as_attachment=True,
|
||||
download_name=f'{backup_id}.zip')
|
||||
except Exception as e:
|
||||
logger.error(f"Error downloading backup: {e}")
|
||||
return jsonify({'error': str(e)}), 500
|
||||
|
||||
|
||||
@bp.route('/api/config/backup/upload', methods=['POST'])
|
||||
def upload_backup():
|
||||
try:
|
||||
from app import config_manager
|
||||
if 'file' not in request.files:
|
||||
return jsonify({'error': 'No file provided'}), 400
|
||||
f = request.files['file']
|
||||
filename = f.filename or ''
|
||||
if filename.endswith('.zip'):
|
||||
backup_id = filename[:-4]
|
||||
else:
|
||||
backup_id = f"backup_{datetime.utcnow().strftime('%Y%m%d_%H%M%S')}"
|
||||
backup_id = ''.join(c for c in backup_id if c.isalnum() or c == '_')
|
||||
backup_path = config_manager.backup_dir / backup_id
|
||||
backup_path.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
with zipfile.ZipFile(io.BytesIO(f.read())) as zf:
|
||||
zf.extractall(backup_path)
|
||||
except zipfile.BadZipFile:
|
||||
shutil.rmtree(backup_path, ignore_errors=True)
|
||||
return jsonify({'error': 'Invalid zip file'}), 400
|
||||
if not (backup_path / 'manifest.json').exists():
|
||||
shutil.rmtree(backup_path, ignore_errors=True)
|
||||
return jsonify({'error': 'Invalid backup: missing manifest.json'}), 400
|
||||
return jsonify({'backup_id': backup_id})
|
||||
except Exception as e:
|
||||
logger.error(f"Error uploading backup: {e}")
|
||||
return jsonify({'error': str(e)}), 500
|
||||
|
||||
|
||||
@bp.route('/api/config/backups/<backup_id>', methods=['DELETE'])
|
||||
def delete_config_backup(backup_id):
|
||||
try:
|
||||
from app import config_manager
|
||||
success = config_manager.delete_backup(backup_id)
|
||||
if success:
|
||||
return jsonify({"message": f"Backup {backup_id} deleted"})
|
||||
return jsonify({"error": f"Failed to delete backup {backup_id}"}), 500
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting backup: {e}")
|
||||
return jsonify({"error": str(e)}), 500
|
||||
Reference in New Issue
Block a user