fix: silent autosave, pending dedup, domain/cell_name pending, containers access

- Settings: remove Save buttons; autosave is silent (no toast on success, error only)
- Settings: loadAll() resets dirty flags to prevent stale autosave after discard
- app.py: fix domain/ip_range "actually changed" check — full identity is always
  sent on save so these were triggering pending on every keystroke regardless
- app.py: _dedup_changes handles port-change format "service field: old → new"
  (split on ':' not ' changed') so dns_port changed twice shows one entry
- app.py: domain + cell_name changes now go through pending restart banner;
  apply_domain/apply_cell_name write files immediately (reload=False) and set
  pending; Discard restores zone files + Caddyfile to pre-change state
- app.py: _set_pending_restart captures pre-change snapshot BEFORE config writes
  (was snapshotting after, making Discard a no-op)
- app.py: is_local_request reads /proc/net/route to allow the actual Docker
  bridge subnet (172.0.0.0/24) which is not RFC-1918; fixes Containers page 403
- container_manager: get_container_logs raises instead of swallowing exceptions
  so nonexistent container returns 500+error not 200+empty

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-04-24 07:16:13 -04:00
parent 4215e03ac6
commit 2bd6545f0e
5 changed files with 184 additions and 103 deletions
+119 -21
View File
@@ -319,6 +319,26 @@ def health_monitor_loop():
health_monitor_thread = threading.Thread(target=health_monitor_loop, daemon=True) health_monitor_thread = threading.Thread(target=health_monitor_loop, daemon=True)
health_monitor_thread.start() health_monitor_thread.start()
def _local_subnets():
"""Return all subnets the container is directly connected to (from routing table)."""
import ipaddress as _ipa, socket as _sock, struct as _struct
nets = []
try:
with open('/proc/net/route') as _f:
for _line in _f.readlines()[1:]:
_parts = _line.strip().split()
if len(_parts) < 8 or _parts[0] == 'lo':
continue
_dest = _sock.inet_ntoa(_struct.pack('<I', int(_parts[1], 16)))
_mask = _sock.inet_ntoa(_struct.pack('<I', int(_parts[7], 16)))
if _dest == '0.0.0.0':
continue
nets.append(_ipa.ip_network(f'{_dest}/{_mask}', strict=False))
except Exception:
pass
return nets
def is_local_request(): def is_local_request():
remote_addr = request.remote_addr remote_addr = request.remote_addr
forwarded_for = request.headers.get('X-Forwarded-For', '') forwarded_for = request.headers.get('X-Forwarded-For', '')
@@ -331,13 +351,21 @@ def is_local_request():
try: try:
import ipaddress as _ipa import ipaddress as _ipa
ip = _ipa.ip_address(addr) ip = _ipa.ip_address(addr)
if ip.is_private or ip.is_loopback: if ip.is_loopback:
return True return True
# Also allow IPs in the configured cell-network, which may fall outside # RFC-1918 private ranges
# RFC-1918 (e.g. 172.0.0.0/24 is not in 172.16.0.0/12). for _rfc in ('10.0.0.0/8', '172.16.0.0/12', '192.168.0.0/16'):
cell_net = config_manager.configs.get('_identity', {}).get( if ip in _ipa.ip_network(_rfc):
return True
# Any subnet the container is directly attached to (handles non-RFC-1918
# Docker bridge networks such as 172.0.0.0/24).
for _net in _local_subnets():
if ip in _net:
return True
# Configured cell ip_range (WireGuard peer subnet)
_cell = config_manager.configs.get('_identity', {}).get(
'ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')) 'ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16'))
if ip in _ipa.ip_network(cell_net, strict=False): if ip in _ipa.ip_network(_cell, strict=False):
return True return True
except Exception: except Exception:
pass pass
@@ -345,8 +373,7 @@ def is_local_request():
if _allowed(remote_addr): if _allowed(remote_addr):
return True return True
# Only trust the LAST X-Forwarded-For entry — that is what Caddy appended. # Only trust the LAST X-Forwarded-For entry — that is what the reverse proxy appended.
# Iterating all entries allows clients to spoof local origin by prepending 127.0.0.1.
if forwarded_for: if forwarded_for:
last_hop = forwarded_for.split(',')[-1].strip() last_hop = forwarded_for.split(',')[-1].strip()
if _allowed(last_hop): if _allowed(last_hop):
@@ -517,12 +544,18 @@ def update_config():
except ValueError as _e: except ValueError as _e:
return jsonify({'error': f'wireguard.address is not a valid IP/CIDR: {_e}'}), 400 return jsonify({'error': f'wireguard.address is not a valid IP/CIDR: {_e}'}), 400
# Capture old identity and service configs BEFORE saving, for change detection # Capture old identity and service configs BEFORE saving, for change detection + revert
import copy as _copy
old_identity = dict(config_manager.configs.get('_identity', {})) old_identity = dict(config_manager.configs.get('_identity', {}))
old_svc_configs = { old_svc_configs = {
svc: dict(config_manager.configs.get(svc, {})) svc: dict(config_manager.configs.get(svc, {}))
for svc in data if svc in config_manager.service_schemas for svc in data if svc in config_manager.service_schemas
} }
# Full pre-change snapshot — used by Discard to revert to original state.
# Must be captured here, before any config writes, so it holds the true old values.
_pre_change_snapshot = {k: _copy.deepcopy(v) for k, v in config_manager.configs.items()
if not k.startswith('_')}
_pre_change_snapshot['_identity'] = _copy.deepcopy(config_manager.configs.get('_identity', {}))
if identity_updates: if identity_updates:
stored = config_manager.configs.get('_identity', {}) stored = config_manager.configs.get('_identity', {})
stored.update(identity_updates) stored.update(identity_updates)
@@ -571,11 +604,10 @@ def update_config():
config_manager.configs['_identity'] = _id config_manager.configs['_identity'] = _id
config_manager._save_all_configs() config_manager._save_all_configs()
# Apply cell identity domain to network and email services # Apply cell identity domain to network and email services (write files, defer reload)
if identity_updates.get('domain'): if identity_updates.get('domain') and identity_updates['domain'] != old_identity.get('domain', ''):
domain = identity_updates['domain'] domain = identity_updates['domain']
net_result = network_manager.apply_domain(domain) net_result = network_manager.apply_domain(domain, reload=False)
all_restarted.extend(net_result.get('restarted', []))
all_warnings.extend(net_result.get('warnings', [])) all_warnings.extend(net_result.get('warnings', []))
# Regenerate Caddyfile — virtual host names change with the domain # Regenerate Caddyfile — virtual host names change with the domain
import ip_utils as _ip_domain import ip_utils as _ip_domain
@@ -583,14 +615,18 @@ def update_config():
_cur_range = _cur_id.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')) _cur_range = _cur_id.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16'))
_cur_name = _cur_id.get('cell_name', os.environ.get('CELL_NAME', 'mycell')) _cur_name = _cur_id.get('cell_name', os.environ.get('CELL_NAME', 'mycell'))
_ip_domain.write_caddyfile(_cur_range, _cur_name, domain, '/app/config/caddy/Caddyfile') _ip_domain.write_caddyfile(_cur_range, _cur_name, domain, '/app/config/caddy/Caddyfile')
_set_pending_restart(
[f'domain changed to {domain}'],
['dns', 'caddy'],
pre_change_snapshot=_pre_change_snapshot,
)
# Apply cell name change to DNS hostname record # Apply cell name change to DNS hostname record (write files, defer reload)
if identity_updates.get('cell_name'): if identity_updates.get('cell_name'):
old_name = old_identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell')) old_name = old_identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell'))
new_name = identity_updates['cell_name'] new_name = identity_updates['cell_name']
if old_name != new_name: if old_name != new_name:
cn_result = network_manager.apply_cell_name(old_name, new_name) cn_result = network_manager.apply_cell_name(old_name, new_name, reload=False)
all_restarted.extend(cn_result.get('restarted', []))
all_warnings.extend(cn_result.get('warnings', [])) all_warnings.extend(cn_result.get('warnings', []))
# Regenerate Caddyfile — main virtual host name changes with cell_name # Regenerate Caddyfile — main virtual host name changes with cell_name
import ip_utils as _ip_name import ip_utils as _ip_name
@@ -598,9 +634,14 @@ def update_config():
_cur_range2 = _cur_id2.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')) _cur_range2 = _cur_id2.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16'))
_cur_domain2 = identity_updates.get('domain') or _cur_id2.get('domain', os.environ.get('CELL_DOMAIN', 'cell')) _cur_domain2 = identity_updates.get('domain') or _cur_id2.get('domain', os.environ.get('CELL_DOMAIN', 'cell'))
_ip_name.write_caddyfile(_cur_range2, new_name, _cur_domain2, '/app/config/caddy/Caddyfile') _ip_name.write_caddyfile(_cur_range2, new_name, _cur_domain2, '/app/config/caddy/Caddyfile')
_set_pending_restart(
[f'cell_name changed to {new_name}'],
['dns'],
pre_change_snapshot=_pre_change_snapshot,
)
# Apply ip_range change: regenerate DNS records, update virtual IPs + firewall rules # Apply ip_range change: regenerate DNS records, update virtual IPs + firewall rules
if identity_updates.get('ip_range'): if identity_updates.get('ip_range') and identity_updates['ip_range'] != old_identity.get('ip_range', ''):
import ip_utils import ip_utils
new_range = identity_updates['ip_range'] new_range = identity_updates['ip_range']
cur_identity = config_manager.configs.get('_identity', {}) cur_identity = config_manager.configs.get('_identity', {})
@@ -623,7 +664,8 @@ def update_config():
# docker compose down is required before up (Docker can't change subnet in-place) # docker compose down is required before up (Docker can't change subnet in-place)
_set_pending_restart( _set_pending_restart(
[f'ip_range changed to {new_range} — network will be recreated'], [f'ip_range changed to {new_range} — network will be recreated'],
['*'], network_recreate=True ['*'], network_recreate=True,
pre_change_snapshot=_pre_change_snapshot,
) )
# Detect port changes across service configs and identity # Detect port changes across service configs and identity
@@ -677,7 +719,8 @@ def update_config():
_ip_utils_ports.write_env_file( _ip_utils_ports.write_env_file(
_ip_range, _env_file, _collect_service_ports(config_manager.configs) _ip_range, _env_file, _collect_service_ports(config_manager.configs)
) )
_set_pending_restart(port_change_messages, list(port_changed_containers)) _set_pending_restart(port_change_messages, list(port_changed_containers),
pre_change_snapshot=_pre_change_snapshot)
logger.info(f"Updated config, restarted: {all_restarted}") logger.info(f"Updated config, restarted: {all_restarted}")
return jsonify({ return jsonify({
@@ -717,11 +760,28 @@ def _collect_service_ports(configs: dict) -> dict:
return ports return ports
def _set_pending_restart(changes: list, containers: list = None, network_recreate: bool = False): def _dedup_changes(existing: list, new: list) -> list:
"""Merge change lists, keeping only the latest entry per config key."""
def key_of(msg: str) -> str:
# "ip_range changed to X" → "ip_range"
if ' changed' in msg:
return msg.split(' changed')[0].strip()
# "network dns_port: 52 → 53" → "network dns_port"
if ':' in msg:
return msg.split(':')[0].strip()
return msg
merged = {key_of(c): c for c in existing}
merged.update({key_of(c): c for c in new})
return list(merged.values())
def _set_pending_restart(changes: list, containers: list = None, network_recreate: bool = False,
pre_change_snapshot: dict = None):
"""Record that specific containers need to be restarted to apply configuration. """Record that specific containers need to be restarted to apply configuration.
containers: list of docker-compose service names, or None/'*' to restart all. containers: list of docker-compose service names, or None/'*' to restart all.
network_recreate: True when the Docker bridge subnet changed (requires down+up). network_recreate: True when the Docker bridge subnet changed (requires down+up).
pre_change_snapshot: full config captured BEFORE this save (for Discard to revert).
Merges with any existing pending state so multiple changes accumulate. Merges with any existing pending state so multiple changes accumulate.
""" """
from datetime import datetime as _dt from datetime import datetime as _dt
@@ -729,6 +789,13 @@ def _set_pending_restart(changes: list, containers: list = None, network_recreat
existing_changes = existing.get('changes', []) if existing.get('needs_restart') else [] existing_changes = existing.get('changes', []) if existing.get('needs_restart') else []
existing_containers = existing.get('containers', []) if existing.get('needs_restart') else [] existing_containers = existing.get('containers', []) if existing.get('needs_restart') else []
# Keep the oldest snapshot (the true pre-change state). Never overwrite it with a
# later snapshot — subsequent changes while pending should still revert to origin.
if not existing.get('needs_restart'):
snapshot = pre_change_snapshot or {}
else:
snapshot = existing.get('_snapshot', {})
if containers is None or '*' in (containers or []) or existing_containers == ['*']: if containers is None or '*' in (containers or []) or existing_containers == ['*']:
new_containers = ['*'] new_containers = ['*']
else: else:
@@ -737,9 +804,10 @@ def _set_pending_restart(changes: list, containers: list = None, network_recreat
config_manager.configs['_pending_restart'] = { config_manager.configs['_pending_restart'] = {
'needs_restart': True, 'needs_restart': True,
'changed_at': _dt.utcnow().isoformat(), 'changed_at': _dt.utcnow().isoformat(),
'changes': existing_changes + changes, 'changes': _dedup_changes(existing_changes, changes),
'containers': new_containers, 'containers': new_containers,
'network_recreate': network_recreate or existing.get('network_recreate', False), 'network_recreate': network_recreate or existing.get('network_recreate', False),
'_snapshot': snapshot,
} }
config_manager._save_all_configs() config_manager._save_all_configs()
@@ -765,7 +833,37 @@ def get_pending_config():
@app.route('/api/config/pending', methods=['DELETE']) @app.route('/api/config/pending', methods=['DELETE'])
def cancel_pending_config(): def cancel_pending_config():
"""Discard pending configuration changes without restarting any containers.""" """Discard pending configuration changes and restore config to pre-change snapshot."""
pending = config_manager.configs.get('_pending_restart', {})
snapshot = pending.get('_snapshot', {})
if snapshot:
# Capture current (changed) identity before reverting, to rewrite config files
cur_identity = dict(config_manager.configs.get('_identity', {}))
old_identity = snapshot.get('_identity', {})
# Restore config values from snapshot
for k, v in snapshot.items():
config_manager.configs[k] = v
# Rewrite DNS/Caddy config files back to old values so they match restored config
import ip_utils as _ip_revert
_id = config_manager.configs.get('_identity', {})
_range = _id.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16'))
_cell = _id.get('cell_name', os.environ.get('CELL_NAME', 'mycell'))
_dom = _id.get('domain', os.environ.get('CELL_DOMAIN', 'cell'))
cur_domain = cur_identity.get('domain', '')
old_domain = old_identity.get('domain', '')
if cur_domain and old_domain and cur_domain != old_domain:
network_manager.apply_domain(old_domain, reload=False)
cur_cell_name = cur_identity.get('cell_name', '')
old_cell_name = old_identity.get('cell_name', '')
if cur_cell_name and old_cell_name and cur_cell_name != old_cell_name:
network_manager.apply_cell_name(cur_cell_name, old_cell_name, reload=False)
_ip_revert.write_caddyfile(_range, _cell, _dom, '/app/config/caddy/Caddyfile')
_clear_pending_restart() _clear_pending_restart()
return jsonify({'message': 'Pending changes discarded'}) return jsonify({'message': 'Pending changes discarded'})
+4 -9
View File
@@ -283,15 +283,10 @@ class ContainerManager(BaseServiceManager):
def get_container_logs(self, name: str, tail: int = 100) -> str: def get_container_logs(self, name: str, tail: int = 100) -> str:
"""Get container logs""" """Get container logs"""
try: if not self.client:
if not self.client: raise RuntimeError("Docker client not available")
return "Docker client not available" container = self.client.containers.get(name)
return container.logs(tail=tail).decode('utf-8')
container = self.client.containers.get(name)
return container.logs(tail=tail).decode('utf-8')
except Exception as e:
logger.error(f"Error getting logs for container {name}: {e}")
return str(e)
def get_container_stats(self, name: str) -> dict: def get_container_stats(self, name: str) -> dict:
"""Get container statistics""" """Get container statistics"""
+24 -19
View File
@@ -5,6 +5,7 @@ Handles DNS, DHCP, and NTP functionality
""" """
import os import os
import re
import json import json
import subprocess import subprocess
import logging import logging
@@ -383,8 +384,11 @@ class NetworkManager(BaseServiceManager):
return {'restarted': restarted, 'warnings': warnings} return {'restarted': restarted, 'warnings': warnings}
def apply_domain(self, domain: str) -> Dict[str, Any]: def apply_domain(self, domain: str, reload: bool = True) -> Dict[str, Any]:
"""Update domain across dnsmasq, Corefile, and zone file; reload DNS + DHCP.""" """Update domain across dnsmasq, Corefile, and zone file; reload DNS + DHCP.
reload=False writes config files only use when deferring container restart.
"""
restarted = [] restarted = []
warnings = [] warnings = []
@@ -400,8 +404,9 @@ class NetworkManager(BaseServiceManager):
] ]
with open(dhcp_conf, 'w') as f: with open(dhcp_conf, 'w') as f:
f.writelines(lines) f.writelines(lines)
self._reload_dhcp_service() if reload:
restarted.append('cell-dhcp (reloaded)') self._reload_dhcp_service()
restarted.append('cell-dhcp (reloaded)')
except Exception as e: except Exception as e:
warnings.append(f"dnsmasq domain update failed: {e}") warnings.append(f"dnsmasq domain update failed: {e}")
@@ -424,8 +429,6 @@ class NetworkManager(BaseServiceManager):
dns_data = os.path.join(self.data_dir, 'dns') dns_data = os.path.join(self.data_dir, 'dns')
if os.path.isdir(dns_data): if os.path.isdir(dns_data):
dst = os.path.join(dns_data, f'{domain}.zone') dst = os.path.join(dns_data, f'{domain}.zone')
# Find the best source: prefer a non-target zone (old domain) so we
# can migrate its content; fall back to the target zone itself.
zone_files = [ zone_files = [
os.path.join(dns_data, f) os.path.join(dns_data, f)
for f in os.listdir(dns_data) for f in os.listdir(dns_data)
@@ -443,7 +446,6 @@ class NetworkManager(BaseServiceManager):
r'^\$ORIGIN\s+\S+', f'$ORIGIN {domain}.', zone_content, flags=re.MULTILINE) r'^\$ORIGIN\s+\S+', f'$ORIGIN {domain}.', zone_content, flags=re.MULTILINE)
with open(dst, 'w') as f: with open(dst, 'w') as f:
f.write(zone_content) f.write(zone_content)
# Remove every zone file that is not the current domain's file
for zone_path in zone_files: for zone_path in zone_files:
if zone_path != dst: if zone_path != dst:
try: try:
@@ -453,17 +455,21 @@ class NetworkManager(BaseServiceManager):
except Exception as e: except Exception as e:
warnings.append(f"zone file domain update failed: {e}") warnings.append(f"zone file domain update failed: {e}")
# 4. Reload CoreDNS # 4. Reload CoreDNS (only when not deferring to Apply)
try: if reload:
self._reload_dns_service() try:
restarted.append('cell-dns (reloaded)') self._reload_dns_service()
except Exception as e: restarted.append('cell-dns (reloaded)')
warnings.append(f"CoreDNS reload failed: {e}") except Exception as e:
warnings.append(f"CoreDNS reload failed: {e}")
return {'restarted': restarted, 'warnings': warnings} return {'restarted': restarted, 'warnings': warnings}
def apply_cell_name(self, old_name: str, new_name: str) -> Dict[str, Any]: def apply_cell_name(self, old_name: str, new_name: str, reload: bool = True) -> Dict[str, Any]:
"""Update the cell hostname record in the primary DNS zone file.""" """Update the cell hostname record in the primary DNS zone file.
reload=False writes the zone file only use when deferring container restart.
"""
restarted = [] restarted = []
warnings = [] warnings = []
if not old_name or not new_name or old_name == new_name: if not old_name or not new_name or old_name == new_name:
@@ -476,8 +482,6 @@ class NetworkManager(BaseServiceManager):
zone_file = os.path.join(dns_data, fname) zone_file = os.path.join(dns_data, fname)
with open(zone_file) as f: with open(zone_file) as f:
content = f.read() content = f.read()
# Replace hostname record: old_name IN A ...
import re
content = re.sub( content = re.sub(
rf'^{re.escape(old_name)}(\s+IN\s+A\s+)', rf'^{re.escape(old_name)}(\s+IN\s+A\s+)',
f'{new_name}\\1', f'{new_name}\\1',
@@ -486,8 +490,9 @@ class NetworkManager(BaseServiceManager):
with open(zone_file, 'w') as f: with open(zone_file, 'w') as f:
f.write(content) f.write(content)
break break
self._reload_dns_service() if reload:
restarted.append('cell-dns (reloaded)') self._reload_dns_service()
restarted.append('cell-dns (reloaded)')
except Exception as e: except Exception as e:
warnings.append(f"cell_name DNS update failed: {e}") warnings.append(f"cell_name DNS update failed: {e}")
return {'restarted': restarted, 'warnings': warnings} return {'restarted': restarted, 'warnings': warnings}
+1
View File
@@ -215,6 +215,7 @@ function AppCore() {
const handleCancel = useCallback(async () => { const handleCancel = useCallback(async () => {
await cellAPI.cancelPending(); await cellAPI.cancelPending();
setPending({ needs_restart: false, changes: [] }); setPending({ needs_restart: false, changes: [] });
window.dispatchEvent(new CustomEvent('pic-config-discarded'));
}, []); }, []);
const navigation = [ const navigation = [
+36 -54
View File
@@ -4,7 +4,7 @@ import { useDraftConfig } from '../contexts/DraftConfigContext';
import { import {
Settings as SettingsIcon, Server, Shield, Network, Mail, Calendar, Settings as SettingsIcon, Server, Shield, Network, Mail, Calendar,
HardDrive, GitBranch, Archive, Upload, Download, Trash2, RotateCcw, HardDrive, GitBranch, Archive, Upload, Download, Trash2, RotateCcw,
Save, ChevronDown, ChevronRight, CheckCircle, XCircle, AlertCircle, ChevronDown, ChevronRight, CheckCircle, XCircle,
RefreshCw, Lock RefreshCw, Lock
} from 'lucide-react'; } from 'lucide-react';
import { cellAPI } from '../services/api'; import { cellAPI } from '../services/api';
@@ -402,12 +402,10 @@ function Settings() {
// identity // identity
const [identity, setIdentity] = useState({ cell_name: '', domain: '', ip_range: '' }); const [identity, setIdentity] = useState({ cell_name: '', domain: '', ip_range: '' });
const [identityDirty, setIdentityDirty] = useState(false); const [identityDirty, setIdentityDirty] = useState(false);
const [identitySaving, setIdentitySaving] = useState(false);
// service configs // service configs
const [serviceConfigs, setServiceConfigs] = useState({}); const [serviceConfigs, setServiceConfigs] = useState({});
const [serviceDirty, setServiceDirty] = useState({}); const [serviceDirty, setServiceDirty] = useState({});
const [serviceSaving, setServiceSaving] = useState({});
const portConflicts = useMemo(() => detectPortConflicts(serviceConfigs), [serviceConfigs]); const portConflicts = useMemo(() => detectPortConflicts(serviceConfigs), [serviceConfigs]);
@@ -431,7 +429,9 @@ function Settings() {
domain: cfg.domain || '', domain: cfg.domain || '',
ip_range: cfg.ip_range || '', ip_range: cfg.ip_range || '',
}); });
setIdentityDirty(false);
setServiceConfigs(cfg.service_configs || {}); setServiceConfigs(cfg.service_configs || {});
setServiceDirty({});
setBackups(bkRes.data || []); setBackups(bkRes.data || []);
} catch (err) { } catch (err) {
toast('Failed to load configuration', 'error'); toast('Failed to load configuration', 'error');
@@ -442,15 +442,11 @@ function Settings() {
useEffect(() => { loadAll(); }, [loadAll]); useEffect(() => { loadAll(); }, [loadAll]);
const _applyResult = (res, label) => { useEffect(() => {
const { restarted = [], warnings = [] } = res.data || {}; const handler = () => loadAll();
if (restarted.length > 0) { window.addEventListener('pic-config-discarded', handler);
toast(`${label} saved — restarted: ${restarted.join(', ')}`); return () => window.removeEventListener('pic-config-discarded', handler);
} else { }, [loadAll]);
toast(`${label} saved`);
}
warnings.forEach((w) => toast(w, 'warning'));
};
// identity save // identity save
const ipRangeError = identity.ip_range && !isRFC1918Cidr(identity.ip_range) const ipRangeError = identity.ip_range && !isRFC1918Cidr(identity.ip_range)
@@ -465,42 +461,34 @@ function Settings() {
? 'Domain must be 255 characters or fewer' ? 'Domain must be 255 characters or fewer'
: (!identity.domain ? 'Domain is required' : null); : (!identity.domain ? 'Domain is required' : null);
const saveIdentity = async () => { const saveIdentity = useCallback(async () => {
if (ipRangeError || cellNameError || domainError) return; if (ipRangeError || cellNameError || domainError) return;
setIdentitySaving(true);
try { try {
const res = await cellAPI.updateConfig(identity); await cellAPI.updateConfig(identity);
setIdentityDirty(false); setIdentityDirty(false);
draftConfig?.setDirty('identity', false); draftConfig?.setDirty('identity', false);
_applyResult(res, 'Cell identity');
refreshConfig(); refreshConfig();
} catch (err) { } catch (err) {
toast(err.response?.data?.error || 'Failed to save identity', 'error'); toast(err.response?.data?.error || 'Failed to save identity', 'error');
} finally {
setIdentitySaving(false);
} }
}; }, [identity, ipRangeError, cellNameError, domainError, draftConfig, refreshConfig]);
// service config save // service config save
const saveService = async (key) => { const saveService = useCallback(async (key) => {
const { defaults } = SERVICE_DEFS.find((d) => d.key === key) || {}; const { defaults } = SERVICE_DEFS.find((d) => d.key === key) || {};
const data = { ...(defaults || {}), ...(serviceConfigs[key] || {}) }; const data = { ...(defaults || {}), ...(serviceConfigs[key] || {}) };
const hasFieldErrors = Object.keys(validateServiceConfig(key, data)).length > 0; const hasFieldErrors = Object.keys(validateServiceConfig(key, data)).length > 0;
const hasConflicts = (PORT_CONFLICT_FIELDS[key] || []).some(f => portConflicts[`${key}|${f}`]); const hasConflicts = (PORT_CONFLICT_FIELDS[key] || []).some(f => portConflicts[`${key}|${f}`]);
if (hasFieldErrors || hasConflicts) return; if (hasFieldErrors || hasConflicts) return;
setServiceSaving((s) => ({ ...s, [key]: true }));
try { try {
const res = await cellAPI.updateConfig({ [key]: serviceConfigs[key] }); await cellAPI.updateConfig({ [key]: serviceConfigs[key] });
setServiceDirty((d) => ({ ...d, [key]: false })); setServiceDirty((d) => ({ ...d, [key]: false }));
draftConfig?.setDirty(key, false); draftConfig?.setDirty(key, false);
_applyResult(res, key);
refreshConfig(); refreshConfig();
} catch (err) { } catch (err) {
toast(err.response?.data?.error || `Failed to save ${key} config`, 'error'); toast(err.response?.data?.error || `Failed to save ${key} config`, 'error');
} finally {
setServiceSaving((s) => ({ ...s, [key]: false }));
} }
}; }, [serviceConfigs, portConflicts, draftConfig, refreshConfig]);
const updateServiceConfig = (key, data) => { const updateServiceConfig = (key, data) => {
setServiceConfigs((prev) => ({ ...prev, [key]: data })); setServiceConfigs((prev) => ({ ...prev, [key]: data }));
@@ -541,6 +529,28 @@ function Settings() {
}, [draftConfig]); }, [draftConfig]);
// //
// Debounced auto-save
useEffect(() => {
if (!identityDirty) return;
if (ipRangeError || cellNameError || domainError) return;
const timer = setTimeout(() => saveIdentityRef.current(), 800);
return () => clearTimeout(timer);
}, [identity, identityDirty, ipRangeError, cellNameError, domainError]); // eslint-disable-line react-hooks/exhaustive-deps
useEffect(() => {
const timers = SERVICE_DEFS
.filter(({ key }) => serviceDirty[key])
.filter(({ key, defaults }) => {
const data = { ...defaults, ...(serviceConfigs[key] || {}) };
const hasFieldErrors = Object.keys(validateServiceConfig(key, data)).length > 0;
const hasConflicts = (PORT_CONFLICT_FIELDS[key] || []).some(f => portConflicts[`${key}|${f}`]);
return !hasFieldErrors && !hasConflicts;
})
.map(({ key }) => setTimeout(() => saveServiceRef.current(key), 800));
return () => timers.forEach(clearTimeout);
}, [serviceConfigs, serviceDirty, portConflicts]); // eslint-disable-line react-hooks/exhaustive-deps
//
// backups // backups
const createBackup = async () => { const createBackup = async () => {
setBackupCreating(true); setBackupCreating(true);
@@ -654,20 +664,6 @@ function Settings() {
/> />
</Field> </Field>
</div> </div>
<div className="flex justify-end mt-4">
<button
onClick={saveIdentity}
disabled={!identityDirty || identitySaving || !!ipRangeError || !!cellNameError || !!domainError}
className="btn-primary flex items-center gap-2 text-sm disabled:opacity-50"
>
{identitySaving ? <RefreshCw className="h-4 w-4 animate-spin" /> : <Save className="h-4 w-4" />}
Save Identity
</button>
</div>
<p className="text-xs text-gray-400 mt-2">
IP Range and port changes update the .env file and mark affected containers for restart.
Use the banner above to apply when ready.
</p>
</Section> </Section>
{/* Service Configurations */} {/* Service Configurations */}
@@ -682,23 +678,9 @@ function Settings() {
if (msg) conflictErrors[field] = msg; if (msg) conflictErrors[field] = msg;
} }
const errors = { ...validateServiceConfig(key, data), ...conflictErrors }; const errors = { ...validateServiceConfig(key, data), ...conflictErrors };
const hasErrors = Object.keys(errors).length > 0;
const dirty = serviceDirty[key];
const saving = serviceSaving[key];
return ( return (
<Section key={key} icon={Icon} title={label} collapsible defaultOpen={false}> <Section key={key} icon={Icon} title={label} collapsible defaultOpen={false}>
<Form data={data} onChange={(d) => updateServiceConfig(key, d)} errors={errors} /> <Form data={data} onChange={(d) => updateServiceConfig(key, d)} errors={errors} />
<div className="flex items-center justify-between mt-4">
<span className="text-xs text-gray-400">Port/directory changes take effect after container restart.</span>
<button
onClick={() => saveService(key)}
disabled={!dirty || saving || hasErrors}
className="btn-primary flex items-center gap-2 text-sm disabled:opacity-50"
>
{saving ? <RefreshCw className="h-4 w-4 animate-spin" /> : <Save className="h-4 w-4" />}
Save
</button>
</div>
</Section> </Section>
); );
})} })}