diff --git a/api/app.py b/api/app.py
index b4d502c..9e74f3c 100644
--- a/api/app.py
+++ b/api/app.py
@@ -400,6 +400,16 @@ def get_config():
'ip_range': identity.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')),
'wireguard_port': identity.get('wireguard_port', int(os.environ.get('WG_PORT', '51820'))),
}
+ # Expose computed per-service IPs so the frontend doesn't need to derive them
+ import ip_utils as _ip_utils_cfg
+ _ips = _ip_utils_cfg.get_service_ips(config['ip_range'])
+ config['service_ips'] = {
+ 'dns': _ips['dns'],
+ 'vip_mail': _ips['vip_mail'],
+ 'vip_calendar': _ips['vip_calendar'],
+ 'vip_files': _ips['vip_files'],
+ 'vip_webdav': _ips['vip_webdav'],
+ }
config['service_configs'] = service_configs
return jsonify(config)
except Exception as e:
@@ -464,6 +474,12 @@ def update_config():
n = len(peer_registry.list_peers())
if n:
all_warnings.append(f'WireGuard endpoint changed — {n} peer(s) must reinstall VPN config')
+ # Keep identity.wireguard_port in sync with service config port
+ if 'port' in config:
+ _id = config_manager.configs.get('_identity', {})
+ _id['wireguard_port'] = config['port']
+ config_manager.configs['_identity'] = _id
+ config_manager._save_all_configs()
# Apply cell identity domain to network and email services
if identity_updates.get('domain'):
@@ -498,8 +514,12 @@ def update_config():
# Write new .env with updated IPs (and current ports) for next container start
env_file = os.environ.get('COMPOSE_ENV_FILE', '/app/.env.compose')
ip_utils.write_env_file(new_range, env_file, _collect_service_ports(config_manager.configs))
- # Mark ALL containers as needing restart (IPs affect every container)
- _set_pending_restart([f'ip_range changed to {new_range} — container IPs updated'], ['*'])
+ # Mark ALL containers as needing restart; network_recreate signals that
+ # docker compose down is required before up (Docker can't change subnet in-place)
+ _set_pending_restart(
+ [f'ip_range changed to {new_range} — network will be recreated'],
+ ['*'], network_recreate=True
+ )
# Detect port changes across service configs and identity
# Maps (service_key, field_name) → (port_env_key, [containers])
@@ -518,21 +538,28 @@ def update_config():
port_changed_containers = set()
port_change_messages = []
+ import ip_utils as _ip_utils_pcd
for (svc_key, field), (_env_key, containers) in _PORT_CHANGE_MAP.items():
if svc_key in data and field in data[svc_key]:
- old_val = old_svc_configs.get(svc_key, {}).get(field)
+ default_val = _ip_utils_pcd.PORT_DEFAULTS.get(_env_key)
+ old_val = old_svc_configs.get(svc_key, {}).get(field, default_val)
new_val = data[svc_key][field]
- if old_val is not None and old_val != new_val:
+ if old_val != new_val:
port_changed_containers.update(containers)
port_change_messages.append(
f'{svc_key} {field}: {old_val} → {new_val}'
)
- # wireguard_port in identity also drives WG_PORT env var
+ # wireguard_port in identity also drives WG_PORT env var; sync to service config
if 'wireguard_port' in identity_updates:
- old_wg = old_identity.get('wireguard_port')
+ old_wg = old_identity.get('wireguard_port', _ip_utils_pcd.PORT_DEFAULTS.get('wg_port', 51820))
new_wg = identity_updates['wireguard_port']
- if old_wg is not None and old_wg != new_wg:
+ if old_wg != new_wg:
+ # Sync to wireguard service config and update wg0.conf
+ _wg_svc = config_manager.configs.get('wireguard', {})
+ _wg_svc['port'] = new_wg
+ config_manager.update_service_config('wireguard', _wg_svc)
+ wireguard_manager.update_config({'port': new_wg})
port_changed_containers.add('wireguard')
port_change_messages.append(f'wireguard_port: {old_wg} → {new_wg}')
@@ -585,10 +612,11 @@ def _collect_service_ports(configs: dict) -> dict:
return ports
-def _set_pending_restart(changes: list, containers: list = None):
+def _set_pending_restart(changes: list, containers: list = None, network_recreate: bool = False):
"""Record that specific containers need to be restarted to apply configuration.
containers: list of docker-compose service names, or None/'*' to restart all.
+ network_recreate: True when the Docker bridge subnet changed (requires down+up).
Merges with any existing pending state so multiple changes accumulate.
"""
from datetime import datetime as _dt
@@ -606,13 +634,14 @@ def _set_pending_restart(changes: list, containers: list = None):
'changed_at': _dt.utcnow().isoformat(),
'changes': existing_changes + changes,
'containers': new_containers,
+ 'network_recreate': network_recreate or existing.get('network_recreate', False),
}
config_manager._save_all_configs()
def _clear_pending_restart():
config_manager.configs['_pending_restart'] = {
- 'needs_restart': False, 'changes': [], 'containers': []
+ 'needs_restart': False, 'changes': [], 'containers': [], 'network_recreate': False
}
config_manager._save_all_configs()
@@ -659,24 +688,39 @@ def apply_pending_config():
# Clear pending flag before we restart so it shows cleared after new containers start
_clear_pending_restart()
- # Build compose args: restart all, or only the specific changed containers
- if '*' in containers:
- compose_up_args = ['up', '-d']
- else:
- compose_up_args = ['up', '-d', '--no-deps'] + containers
+ # Check if the IP range (network subnet) is changing — Docker cannot modify an
+ # existing network's subnet in-place, so we need `down` + `up` in that case.
+ needs_network_recreate = pending.get('network_recreate', False)
- # Run docker compose up -d in a background thread; the 0.3s delay lets Flask
- # finish sending this response before cell-api itself gets recreated.
+ if '*' in containers:
+ if needs_network_recreate:
+ # down removes containers AND the bridge network; up recreates everything
+ compose_down_args = ['down']
+ compose_up_args = ['up', '-d']
+ else:
+ compose_down_args = None
+ compose_up_args = ['up', '-d']
+ else:
+ compose_down_args = None
+ compose_up_args = ['up', '-d', '--no-deps'] + containers
+
+ base_cmd = ['docker', 'compose',
+ '--project-directory', project_dir,
+ '-f', '/app/docker-compose.yml',
+ '--env-file', '/app/.env.compose']
+
+ # Run in a background thread; 0.3 s delay lets Flask send this response first.
def _do_apply():
import time as _time
_time.sleep(0.3)
- result = subprocess.run(
- ['docker', 'compose',
- '--project-directory', project_dir,
- '-f', '/app/docker-compose.yml',
- '--env-file', '/app/.env.compose'] + compose_up_args,
- capture_output=True, text=True, timeout=120
- )
+ if compose_down_args:
+ r = subprocess.run(base_cmd + compose_down_args,
+ capture_output=True, text=True, timeout=60)
+ if r.returncode != 0:
+ logger.error(f"docker compose down failed: {r.stderr.strip()}")
+ return
+ result = subprocess.run(base_cmd + compose_up_args,
+ capture_output=True, text=True, timeout=120)
if result.returncode != 0:
logger.error(f"docker compose up failed: {result.stderr.strip()}")
else:
diff --git a/api/calendar_manager.py b/api/calendar_manager.py
index 60951b6..ee6dd2c 100644
--- a/api/calendar_manager.py
+++ b/api/calendar_manager.py
@@ -478,7 +478,7 @@ class CalendarManager(BaseServiceManager):
f.write(config_content)
def apply_config(self, config: Dict[str, Any]) -> Dict[str, Any]:
- """Update radicale config port and restart cell-radicale."""
+ """Update radicale config file. Port changes go through pending restart (docker binding)."""
restarted = []
warnings = []
if 'port' not in config:
@@ -494,8 +494,8 @@ class CalendarManager(BaseServiceManager):
]
with open(radicale_conf, 'w') as f:
f.writelines(lines)
- self._restart_container('cell-radicale')
- restarted.append('cell-radicale')
+ # No immediate restart — docker port binding must be updated first.
+ # The pending restart banner will run docker compose up with updated .env.
except Exception as e:
warnings.append(f"radicale config update failed: {e}")
return {'restarted': restarted, 'warnings': warnings}
diff --git a/api/wireguard_manager.py b/api/wireguard_manager.py
index 141fdff..4590dac 100644
--- a/api/wireguard_manager.py
+++ b/api/wireguard_manager.py
@@ -225,21 +225,27 @@ class WireGuardManager(BaseServiceManager):
return result
changed = False
+ port_only_change = True
if 'port' in config and config['port']:
lines = _set_iface_field(lines, 'ListenPort', config['port'])
changed = True
if 'address' in config and config['address']:
lines = _set_iface_field(lines, 'Address', config['address'])
changed = True
+ port_only_change = False
if 'private_key' in config and config['private_key']:
lines = _set_iface_field(lines, 'PrivateKey', config['private_key'])
changed = True
+ port_only_change = False
if changed:
with open(cf, 'w') as f:
f.writelines(lines)
- self._restart_container('cell-wireguard')
- restarted.append('cell-wireguard')
+ # Port-only changes: docker binding must be updated first via pending restart.
+ # Non-port changes (address, private_key) can restart immediately.
+ if not port_only_change:
+ self._restart_container('cell-wireguard')
+ restarted.append('cell-wireguard')
except Exception as e:
warnings.append(f"wg0.conf update failed: {e}")
logger.error(f"apply_config error: {e}")
diff --git a/docker-compose.yml b/docker-compose.yml
index 60c2615..99ca914 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -167,7 +167,7 @@ services:
- PUID=${PUID:-1000}
- PGID=${PGID:-1000}
ports:
- - "${WG_PORT:-51820}:51820/udp"
+ - "${WG_PORT:-51820}:${WG_PORT:-51820}/udp"
volumes:
- ./config/wireguard:/config
- /lib/modules:/lib/modules
diff --git a/tests/test_pending_restart.py b/tests/test_pending_restart.py
index cab4cf1..4fbe252 100644
--- a/tests/test_pending_restart.py
+++ b/tests/test_pending_restart.py
@@ -209,5 +209,152 @@ class TestCancelPendingEndpoint(unittest.TestCase):
self.assertEqual(data['changes'], [])
+class TestPortChangeDetection(unittest.TestCase):
+ """Test that port changes always trigger pending restart, even on first save."""
+
+ def setUp(self):
+ app.config['TESTING'] = True
+ self.client = app.test_client()
+ _clear_pending_restart()
+ # Remove any stored service configs so we start clean
+ for key in ('calendar', 'files', 'wireguard', 'network', 'email'):
+ config_manager.configs.pop(key, None)
+ self.tmp = tempfile.mkdtemp()
+ self.env_path = os.path.join(self.tmp, '.env')
+
+ def tearDown(self):
+ _clear_pending_restart()
+ for key in ('calendar', 'files', 'wireguard', 'network', 'email'):
+ config_manager.configs.pop(key, None)
+ shutil.rmtree(self.tmp)
+
+ def _put_config(self, payload):
+ return self.client.put('/api/config',
+ data=json.dumps(payload),
+ content_type='application/json')
+
+ def test_calendar_port_first_save_marks_pending(self):
+ """First-time calendar port save should still queue pending restart."""
+ r = self._put_config({'calendar': {'port': 5233}})
+ self.assertEqual(r.status_code, 200)
+ p = config_manager.configs.get('_pending_restart', {})
+ self.assertTrue(p.get('needs_restart'), 'pending restart not set on first calendar port save')
+ self.assertIn('radicale', p.get('containers', []))
+
+ def test_files_port_first_save_marks_pending(self):
+ """First-time files (webdav) port save should queue pending restart."""
+ r = self._put_config({'files': {'port': 8181}})
+ self.assertEqual(r.status_code, 200)
+ p = config_manager.configs.get('_pending_restart', {})
+ self.assertTrue(p.get('needs_restart'))
+ self.assertIn('webdav', p.get('containers', []))
+
+ def test_files_manager_port_first_save_marks_pending(self):
+ r = self._put_config({'files': {'manager_port': 9090}})
+ self.assertEqual(r.status_code, 200)
+ p = config_manager.configs.get('_pending_restart', {})
+ self.assertTrue(p.get('needs_restart'))
+ self.assertIn('filegator', p.get('containers', []))
+
+ def test_multiple_service_port_changes_accumulate_containers(self):
+ """Saving two services should accumulate both containers in pending."""
+ self._put_config({'calendar': {'port': 5233}})
+ self._put_config({'files': {'port': 8181}})
+ p = config_manager.configs.get('_pending_restart', {})
+ self.assertTrue(p.get('needs_restart'))
+ containers = p.get('containers', [])
+ self.assertIn('radicale', containers)
+ self.assertIn('webdav', containers)
+
+ def test_same_port_as_default_no_pending(self):
+ """Saving the default port value should NOT trigger pending restart."""
+ r = self._put_config({'calendar': {'port': 5232}}) # 5232 is default
+ self.assertEqual(r.status_code, 200)
+ p = config_manager.configs.get('_pending_restart', {})
+ self.assertFalse(p.get('needs_restart', False))
+
+
+class TestEnvFileWrittenOnPortChange(unittest.TestCase):
+ """Verify that PUT /api/config with a port change actually writes the new
+ port variable to the .env file consumed by docker compose.
+
+ This is the critical link between 'port saved in config' and 'docker binding
+ changes on next restart'. Without this the container would still bind the old
+ port even after apply is clicked.
+ """
+
+ def setUp(self):
+ app.config['TESTING'] = True
+ self.client = app.test_client()
+ _clear_pending_restart()
+ for key in ('calendar', 'files', 'wireguard', 'network', 'email'):
+ config_manager.configs.pop(key, None)
+ self.tmp = tempfile.mkdtemp()
+ self.env_path = os.path.join(self.tmp, '.env')
+ # Pre-create .env so write_env_file can overwrite it
+ import ip_utils
+ ip_utils.write_env_file('172.20.0.0/16', self.env_path)
+
+ def tearDown(self):
+ _clear_pending_restart()
+ for key in ('calendar', 'files', 'wireguard', 'network', 'email'):
+ config_manager.configs.pop(key, None)
+ shutil.rmtree(self.tmp)
+
+ def _put_config(self, payload):
+ with patch.dict(os.environ, {'COMPOSE_ENV_FILE': self.env_path}):
+ return self.client.put('/api/config',
+ data=json.dumps(payload),
+ content_type='application/json')
+
+ def _env_content(self):
+ return open(self.env_path).read()
+
+ def test_calendar_port_written_to_env(self):
+ self._put_config({'calendar': {'port': 5299}})
+ self.assertIn('RADICALE_PORT=5299', self._env_content())
+
+ def test_webdav_port_written_to_env(self):
+ self._put_config({'files': {'port': 8181}})
+ self.assertIn('WEBDAV_PORT=8181', self._env_content())
+
+ def test_filegator_port_written_to_env(self):
+ self._put_config({'files': {'manager_port': 9090}})
+ self.assertIn('FILEGATOR_PORT=9090', self._env_content())
+
+ def test_wireguard_port_written_to_env(self):
+ self._put_config({'wireguard': {'port': 51999}})
+ self.assertIn('WG_PORT=51999', self._env_content())
+
+ def test_email_smtp_port_written_to_env(self):
+ self._put_config({'email': {'smtp_port': 2525}})
+ self.assertIn('MAIL_SMTP_PORT=2525', self._env_content())
+
+ def test_other_ports_unchanged_when_one_port_changes(self):
+ """Changing calendar port must not reset unrelated ports to defaults."""
+ # First set webdav to a non-default
+ self._put_config({'files': {'port': 8181}})
+ # Then change calendar port
+ self._put_config({'calendar': {'port': 5299}})
+ content = self._env_content()
+ self.assertIn('RADICALE_PORT=5299', content)
+ self.assertIn('WEBDAV_PORT=8181', content) # must stay at 8181, not revert to 8080
+
+ def test_env_uses_symmetric_wg_port_for_docker_binding(self):
+ """WG_PORT must be the same value on both sides of the docker port mapping.
+
+ docker-compose.yml uses ${WG_PORT:-51820}:${WG_PORT:-51820}/udp so the
+ host port and container port are always the same. This test verifies
+ a port change writes a single consistent value to .env so the daemon's
+ ListenPort matches the Docker binding.
+ """
+ self._put_config({'wireguard': {'port': 51999}})
+ content = self._env_content()
+ self.assertIn('WG_PORT=51999', content)
+ # There must be only one WG_PORT line (no duplicate with old value)
+ wg_lines = [l for l in content.splitlines() if l.startswith('WG_PORT=')]
+ self.assertEqual(len(wg_lines), 1, f'Expected exactly one WG_PORT line, got: {wg_lines}')
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/webui/src/App.jsx b/webui/src/App.jsx
index 1d3299c..b81f7eb 100644
--- a/webui/src/App.jsx
+++ b/webui/src/App.jsx
@@ -164,9 +164,39 @@ function App() {
};
}, [checkHealth, checkPending]);
+ const [applyStatus, setApplyStatus] = useState(null); // null | 'restarting' | 'done' | 'timeout' | 'error'
+ const [applyError, setApplyError] = useState('');
+
const handleApply = useCallback(async () => {
- await cellAPI.applyPending();
+ setApplyError('');
+ try {
+ await cellAPI.applyPending();
+ } catch (err) {
+ setApplyStatus('error');
+ setApplyError(err?.response?.data?.error || 'Apply request failed');
+ setTimeout(() => setApplyStatus(null), 6000);
+ return;
+ }
setPending({ needs_restart: false, changes: [] });
+ setApplyStatus('restarting');
+
+ // Poll health until API responds again (max 45 s; it may briefly drop if cell-api restarts)
+ const deadline = Date.now() + 45000;
+ while (Date.now() < deadline) {
+ await new Promise(r => setTimeout(r, 2000));
+ try {
+ await healthAPI.check();
+ setIsOnline(true);
+ setApplyStatus('done');
+ setTimeout(() => setApplyStatus(null), 4000);
+ return;
+ } catch {
+ setIsOnline(false);
+ }
+ }
+ setApplyStatus('timeout');
+ setApplyError('Containers may still be starting — check docker logs if services are unavailable');
+ setTimeout(() => setApplyStatus(null), 8000);
}, []);
const handleCancel = useCallback(async () => {
@@ -231,10 +261,31 @@ function App() {
)}
- {isOnline && pending.needs_restart && (
+ {isOnline && pending.needs_restart && !applyStatus && (
- Requires VPN connection. DNS server must be set to 172.20.0.3. + Requires VPN connection. DNS server must be set to {dnsIp}.
diff --git a/webui/src/pages/Email.jsx b/webui/src/pages/Email.jsx index 41852d3..97b978b 100644 --- a/webui/src/pages/Email.jsx +++ b/webui/src/pages/Email.jsx @@ -3,7 +3,6 @@ import { Mail, Users, Wifi, Copy, CheckCheck } from 'lucide-react'; import { emailAPI } from '../services/api'; import { useConfig } from '../contexts/ConfigContext'; -const CELL_IP = '172.20.0.23'; function CopyButton({ text }) { const [copied, setCopied] = useState(false); @@ -32,8 +31,14 @@ function InfoRow({ label, value }) { } function Email() { - const { domain = 'cell' } = useConfig(); + const { domain = 'cell', service_ips = {}, service_configs = {} } = useConfig(); const cellHost = `mail.${domain}`; + const emailCfg = service_configs.email || {}; + const mailIp = service_ips.vip_mail || '172.20.0.23'; + const dnsIp = service_ips.dns || '172.20.0.3'; + const imapPort = emailCfg.imap_port ?? 993; + const smtpPort = emailCfg.smtp_port ?? 25; + const webmailPort = emailCfg.webmail_port ?? 8888; const [users, setUsers] = useState([]); const [status, setStatus] = useState(null); const [isLoading, setIsLoading] = useState(true); @@ -81,9 +86,9 @@ function Email() {- Requires VPN + DNS set to 172.20.0.3. + Requires VPN + DNS set to {dnsIp}.
diff --git a/webui/src/pages/Files.jsx b/webui/src/pages/Files.jsx index 4fdd0f3..aaeea44 100644 --- a/webui/src/pages/Files.jsx +++ b/webui/src/pages/Files.jsx @@ -3,8 +3,6 @@ import { FolderOpen, Users, HardDrive, Wifi, Copy, CheckCheck } from 'lucide-rea import { fileAPI } from '../services/api'; import { useConfig } from '../contexts/ConfigContext'; -const FILES_IP = '172.20.0.22'; -const WEBDAV_IP = '172.20.0.24'; function CopyButton({ text }) { const [copied, setCopied] = useState(false); @@ -33,9 +31,14 @@ function InfoRow({ label, value }) { } function Files() { - const { domain = 'cell' } = useConfig(); - const filesHost = `files.${domain}`; - const webdavHost = `webdav.${domain}`; + const { domain = 'cell', service_ips = {}, service_configs = {} } = useConfig(); + const filesHost = `files.${domain}`; + const webdavHost = `webdav.${domain}`; + const filesIp = service_ips.vip_files || '172.20.0.22'; + const webdavIp = service_ips.vip_webdav || '172.20.0.24'; + const filesCfg = service_configs.files || {}; + const webdavPort = filesCfg.port ?? 8080; + const filegatorPort = filesCfg.manager_port ?? 8082; const [users, setUsers] = useState([]); const [status, setStatus] = useState(null); const [isLoading, setIsLoading] = useState(true); @@ -83,8 +86,8 @@ function Files() {Browser-based file manager. Requires VPN. @@ -99,8 +102,8 @@ function Files() {
diff --git a/webui/src/pages/Settings.jsx b/webui/src/pages/Settings.jsx
index eaeac9b..7223b9f 100644
--- a/webui/src/pages/Settings.jsx
+++ b/webui/src/pages/Settings.jsx
@@ -287,7 +287,7 @@ function Settings() {
const { refresh: refreshConfig } = useConfig();
// identity
- const [identity, setIdentity] = useState({ cell_name: '', domain: '', ip_range: '', wireguard_port: 51820 });
+ const [identity, setIdentity] = useState({ cell_name: '', domain: '', ip_range: '' });
const [identityDirty, setIdentityDirty] = useState(false);
const [identitySaving, setIdentitySaving] = useState(false);
@@ -315,7 +315,6 @@ function Settings() {
cell_name: cfg.cell_name || '',
domain: cfg.domain || '',
ip_range: cfg.ip_range || '',
- wireguard_port: cfg.wireguard_port || 51820,
});
setServiceConfigs(cfg.service_configs || {});
setBackups(bkRes.data || []);
@@ -360,6 +359,7 @@ function Settings() {
const res = await cellAPI.updateConfig({ [key]: serviceConfigs[key] });
setServiceDirty((d) => ({ ...d, [key]: false }));
_applyResult(res, key);
+ refreshConfig();
} catch {
toast(`Failed to save ${key} config`, 'error');
} finally {
@@ -482,13 +482,6 @@ function Settings() {
placeholder="172.20.0.0/16"
/>
-
UDP Port {serverConfig?.port || 51820}
+UDP Port {configPort ?? serverConfig?.port ?? 51820}
{serverConfig ? (