Merge branch 'feature/pending-restart' into 'main'

feat: replace hardcoded docker-compose IPs with .env-based substitution

See merge request root/pic!2
This commit is contained in:
Administrator
2026-04-22 15:35:14 +00:00
7 changed files with 333 additions and 149 deletions
+89 -19
View File
@@ -481,33 +481,21 @@ def update_config():
if identity_updates.get('ip_range'): if identity_updates.get('ip_range'):
import ip_utils import ip_utils
new_range = identity_updates['ip_range'] new_range = identity_updates['ip_range']
old_range = old_identity.get('ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16'))
cur_identity = config_manager.configs.get('_identity', {}) cur_identity = config_manager.configs.get('_identity', {})
cur_cell_name = cur_identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell')) cur_cell_name = cur_identity.get('cell_name', os.environ.get('CELL_NAME', 'mycell'))
cur_domain = cur_identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell')) cur_domain = cur_identity.get('domain', os.environ.get('CELL_DOMAIN', 'cell'))
# Update DNS zone records # Update DNS zone records immediately
ip_result = network_manager.apply_ip_range(new_range, cur_cell_name, cur_domain) ip_result = network_manager.apply_ip_range(new_range, cur_cell_name, cur_domain)
all_restarted.extend(ip_result.get('restarted', [])) all_restarted.extend(ip_result.get('restarted', []))
all_warnings.extend(ip_result.get('warnings', [])) all_warnings.extend(ip_result.get('warnings', []))
# Update firewall virtual IPs (iptables) and Caddy virtual IPs # Update firewall virtual IPs (iptables) and Caddy virtual IPs immediately
firewall_manager.update_service_ips(new_range) firewall_manager.update_service_ips(new_range)
firewall_manager.ensure_caddy_virtual_ips() firewall_manager.ensure_caddy_virtual_ips()
# Try to update docker-compose.yml (only works outside container / dev mode) # Write new .env so docker-compose picks up new container IPs on next start
compose_candidates = [ env_file = os.environ.get('COMPOSE_ENV_FILE', '/app/.env.compose')
os.environ.get('COMPOSE_FILE', ''), ip_utils.write_env_file(new_range, env_file)
'/app/../docker-compose.yml', # Mark containers as needing restart
os.path.join(os.path.dirname(__file__), '..', 'docker-compose.yml'), _set_pending_restart([f'ip_range changed to {new_range} — container IPs updated'])
]
compose_updated = False
for cpath in compose_candidates:
if cpath and ip_utils.update_docker_compose_ips(old_range, new_range, cpath):
all_warnings.append(
'docker-compose.yml updated — run `make restart` to apply container IP changes')
compose_updated = True
break
if not compose_updated:
all_warnings.append(
'docker-compose.yml not updated (run `make reinstall` to apply container IP changes)')
logger.info(f"Updated config, restarted: {all_restarted}") logger.info(f"Updated config, restarted: {all_restarted}")
return jsonify({ return jsonify({
@@ -519,6 +507,88 @@ def update_config():
logger.error(f"Error updating config: {e}") logger.error(f"Error updating config: {e}")
return jsonify({"error": str(e)}), 500 return jsonify({"error": str(e)}), 500
# ---------------------------------------------------------------------------
# Pending-restart helpers
# ---------------------------------------------------------------------------
def _set_pending_restart(changes: list):
"""Record that containers need to be restarted to apply configuration."""
from datetime import datetime as _dt
config_manager.configs['_pending_restart'] = {
'needs_restart': True,
'changed_at': _dt.utcnow().isoformat(),
'changes': changes,
}
config_manager._save_all_configs()
def _clear_pending_restart():
config_manager.configs['_pending_restart'] = {'needs_restart': False, 'changes': []}
config_manager._save_all_configs()
@app.route('/api/config/pending', methods=['GET'])
def get_pending_config():
"""Return whether there are unapplied configuration changes that require a restart."""
pending = config_manager.configs.get('_pending_restart', {})
return jsonify({
'needs_restart': pending.get('needs_restart', False),
'changed_at': pending.get('changed_at'),
'changes': pending.get('changes', []),
})
@app.route('/api/config/apply', methods=['POST'])
def apply_pending_config():
"""Apply pending configuration by restarting containers via docker compose up -d."""
try:
pending = config_manager.configs.get('_pending_restart', {})
if not pending.get('needs_restart'):
return jsonify({'message': 'No pending changes to apply'})
# Get project working dir from our own container labels (set by docker-compose)
project_dir = '/home/roof/pic'
try:
import docker as _docker_sdk
_client = _docker_sdk.from_env()
_self = _client.containers.get('cell-api')
project_dir = _self.labels.get('com.docker.compose.project.working_dir', project_dir)
except Exception:
pass
# Clear pending flag before we restart so it shows cleared after the new container starts
_clear_pending_restart()
# Run docker compose up -d in a background thread; the 0.3s delay lets Flask
# finish sending this response before cell-api itself gets recreated.
def _do_apply():
import time as _time
_time.sleep(0.3)
result = subprocess.run(
['docker', 'compose',
'--project-directory', project_dir,
'-f', '/app/docker-compose.yml',
'--env-file', '/app/.env.compose',
'up', '-d'],
capture_output=True, text=True, timeout=120
)
if result.returncode != 0:
logger.error(f"docker compose up failed: {result.stderr.strip()}")
else:
logger.info('docker compose up -d completed successfully')
threading.Thread(target=_do_apply, daemon=False).start()
return jsonify({
'message': 'Applying configuration — containers are restarting',
'restart_in_progress': True,
})
except Exception as e:
logger.error(f"Error applying config: {e}")
return jsonify({'error': str(e)}), 500
# Configuration management endpoints # Configuration management endpoints
@app.route('/api/config/backup', methods=['POST']) @app.route('/api/config/backup', methods=['POST'])
def create_config_backup(): def create_config_backup():
+34 -31
View File
@@ -2,11 +2,13 @@
""" """
IP utility functions for PIC derive all container and virtual IPs from the IP utility functions for PIC derive all container and virtual IPs from the
docker network subnet so that one ip_range setting drives everything. docker network subnet so that one ip_range setting drives everything.
The canonical source of IPs is the .env file at the project root.
docker-compose.yml uses ${VAR:-default} substitution to read from it.
""" """
import ipaddress import ipaddress
import os import os
import re
from typing import Dict from typing import Dict
# Fixed host-number offsets within the subnet (e.g. 172.20.0.0/16 → 172.20.0.<offset>) # Fixed host-number offsets within the subnet (e.g. 172.20.0.0/16 → 172.20.0.<offset>)
@@ -30,6 +32,22 @@ CONTAINER_OFFSETS: Dict[str, int] = {
'vip_webdav': 24, 'vip_webdav': 24,
} }
# Mapping from service key → docker-compose env var name (static containers only)
ENV_VAR_NAMES: Dict[str, str] = {
'caddy': 'CADDY_IP',
'dns': 'DNS_IP',
'dhcp': 'DHCP_IP',
'ntp': 'NTP_IP',
'mail': 'MAIL_IP',
'radicale': 'RADICALE_IP',
'webdav': 'WEBDAV_IP',
'wireguard': 'WG_IP',
'api': 'API_IP',
'webui': 'WEBUI_IP',
'rainloop': 'RAINLOOP_IP',
'filegator': 'FILEGATOR_IP',
}
def get_service_ips(ip_range: str) -> Dict[str, str]: def get_service_ips(ip_range: str) -> Dict[str, str]:
""" """
@@ -60,40 +78,25 @@ def get_virtual_ips(ip_range: str) -> Dict[str, str]:
} }
def update_docker_compose_ips(old_ip_range: str, new_ip_range: str, compose_path: str) -> bool: def write_env_file(ip_range: str, path: str) -> bool:
""" """
Rewrite docker-compose.yml: replace the subnet declaration and every Write (or overwrite) the docker-compose .env file with IPs derived from ip_range.
container ipv4_address that derives from old_ip_range with the new values.
Returns True on success, False if the file is not accessible. docker-compose reads this file automatically at startup to substitute
${VAR:-default} placeholders in docker-compose.yml. Call this at setup
time and whenever ip_range changes so containers get the right IPs on
the next `docker-compose up -d`.
Returns True on success, False if the path is not writable.
""" """
if not os.path.exists(compose_path):
return False
try: try:
old_ips = get_service_ips(old_ip_range) ips = get_service_ips(ip_range)
new_ips = get_service_ips(new_ip_range) lines = [f'CELL_NETWORK={ip_range}\n']
for svc, var in ENV_VAR_NAMES.items():
with open(compose_path) as f: lines.append(f'{var}={ips[svc]}\n')
content = f.read() os.makedirs(os.path.dirname(os.path.abspath(path)), exist_ok=True)
with open(path, 'w') as f:
# Replace subnet string (e.g. "172.20.0.0/16") f.writelines(lines)
content = content.replace(old_ip_range, new_ip_range)
# Replace each container IP (avoid touching VIPs — they're not in compose)
static_names = [n for n in CONTAINER_OFFSETS if not n.startswith('vip_')]
for name in static_names:
old_ip = old_ips[name]
new_ip = new_ips[name]
if old_ip != new_ip:
# Replace only full IP occurrences (word-boundary aware via regex)
content = re.sub(
r'(?<!\d)' + re.escape(old_ip) + r'(?!\d)',
new_ip,
content,
)
with open(compose_path, 'w') as f:
f.write(content)
return True return True
except Exception: except Exception:
return False return False
+15 -13
View File
@@ -17,7 +17,7 @@ services:
- NET_ADMIN - NET_ADMIN
networks: networks:
cell-network: cell-network:
ipv4_address: 172.20.0.2 ipv4_address: ${CADDY_IP:-172.20.0.2}
logging: logging:
driver: json-file driver: json-file
options: options:
@@ -38,7 +38,7 @@ services:
restart: unless-stopped restart: unless-stopped
networks: networks:
cell-network: cell-network:
ipv4_address: 172.20.0.3 ipv4_address: ${DNS_IP:-172.20.0.3}
logging: logging:
driver: json-file driver: json-file
options: options:
@@ -57,7 +57,7 @@ services:
restart: unless-stopped restart: unless-stopped
networks: networks:
cell-network: cell-network:
ipv4_address: 172.20.0.4 ipv4_address: ${DHCP_IP:-172.20.0.4}
command: ["/bin/sh", "-c", "apk add --no-cache dnsmasq && dnsmasq -d -C /etc/dnsmasq.conf"] command: ["/bin/sh", "-c", "apk add --no-cache dnsmasq && dnsmasq -d -C /etc/dnsmasq.conf"]
cap_add: cap_add:
- NET_ADMIN - NET_ADMIN
@@ -78,7 +78,7 @@ services:
restart: unless-stopped restart: unless-stopped
networks: networks:
cell-network: cell-network:
ipv4_address: 172.20.0.5 ipv4_address: ${NTP_IP:-172.20.0.5}
cap_add: cap_add:
- SYS_TIME - SYS_TIME
command: ["/bin/sh", "-c", "apk add --no-cache chrony && rm -f /var/run/chrony/chronyd.pid && exec chronyd -d -f /etc/chrony/chrony.conf -n"] command: ["/bin/sh", "-c", "apk add --no-cache chrony && rm -f /var/run/chrony/chronyd.pid && exec chronyd -d -f /etc/chrony/chrony.conf -n"]
@@ -108,7 +108,7 @@ services:
restart: unless-stopped restart: unless-stopped
networks: networks:
cell-network: cell-network:
ipv4_address: 172.20.0.6 ipv4_address: ${MAIL_IP:-172.20.0.6}
cap_add: cap_add:
- NET_ADMIN - NET_ADMIN
logging: logging:
@@ -129,7 +129,7 @@ services:
restart: unless-stopped restart: unless-stopped
networks: networks:
cell-network: cell-network:
ipv4_address: 172.20.0.7 ipv4_address: ${RADICALE_IP:-172.20.0.7}
logging: logging:
driver: json-file driver: json-file
options: options:
@@ -151,7 +151,7 @@ services:
restart: unless-stopped restart: unless-stopped
networks: networks:
cell-network: cell-network:
ipv4_address: 172.20.0.8 ipv4_address: ${WEBDAV_IP:-172.20.0.8}
logging: logging:
driver: json-file driver: json-file
options: options:
@@ -174,7 +174,7 @@ services:
restart: unless-stopped restart: unless-stopped
networks: networks:
cell-network: cell-network:
ipv4_address: 172.20.0.9 ipv4_address: ${WG_IP:-172.20.0.9}
cap_add: cap_add:
- NET_ADMIN - NET_ADMIN
- SYS_MODULE - SYS_MODULE
@@ -201,11 +201,13 @@ services:
- ./config/dns:/app/config/dns - ./config/dns:/app/config/dns
- ./data/logs:/app/api/data/logs - ./data/logs:/app/api/data/logs
- /var/run/docker.sock:/var/run/docker.sock - /var/run/docker.sock:/var/run/docker.sock
- ./.env:/app/.env.compose
- ./docker-compose.yml:/app/docker-compose.yml:ro
pid: host pid: host
restart: unless-stopped restart: unless-stopped
networks: networks:
cell-network: cell-network:
ipv4_address: 172.20.0.10 ipv4_address: ${API_IP:-172.20.0.10}
depends_on: depends_on:
- wireguard - wireguard
- dns - dns
@@ -224,7 +226,7 @@ services:
restart: unless-stopped restart: unless-stopped
networks: networks:
cell-network: cell-network:
ipv4_address: 172.20.0.11 ipv4_address: ${WEBUI_IP:-172.20.0.11}
logging: logging:
driver: json-file driver: json-file
options: options:
@@ -238,7 +240,7 @@ services:
restart: unless-stopped restart: unless-stopped
networks: networks:
cell-network: cell-network:
ipv4_address: 172.20.0.12 ipv4_address: ${RAINLOOP_IP:-172.20.0.12}
ports: ports:
- "8888:8888" - "8888:8888"
volumes: volumes:
@@ -256,7 +258,7 @@ services:
restart: unless-stopped restart: unless-stopped
networks: networks:
cell-network: cell-network:
ipv4_address: 172.20.0.13 ipv4_address: ${FILEGATOR_IP:-172.20.0.13}
ports: ports:
- "8082:8080" - "8082:8080"
volumes: volumes:
@@ -272,4 +274,4 @@ networks:
driver: bridge driver: bridge
ipam: ipam:
config: config:
- subnet: 172.20.0.0/16 - subnet: ${CELL_NETWORK:-172.20.0.0/16}
+13
View File
@@ -193,11 +193,23 @@ def write_cell_config(cell_name: str, domain: str, port: int):
print(f'[CREATED] config/api/cell_config.json name={cell_name} domain={domain}') print(f'[CREATED] config/api/cell_config.json name={cell_name} domain={domain}')
def write_compose_env(ip_range: str):
"""Generate .env at project root so docker-compose picks up correct container IPs."""
sys.path.insert(0, os.path.join(ROOT, 'api'))
import ip_utils
env_path = os.path.join(ROOT, '.env')
if ip_utils.write_env_file(ip_range, env_path):
print(f'[CREATED] .env (ip_range={ip_range})')
else:
print(f'[WARN] Could not write .env — containers will use built-in default IPs')
def main(): def main():
cell_name = os.environ.get('CELL_NAME', 'mycell') cell_name = os.environ.get('CELL_NAME', 'mycell')
domain = os.environ.get('CELL_DOMAIN', 'cell') domain = os.environ.get('CELL_DOMAIN', 'cell')
vpn_address = os.environ.get('VPN_ADDRESS', '10.0.0.1/24') vpn_address = os.environ.get('VPN_ADDRESS', '10.0.0.1/24')
wg_port = int(os.environ.get('WG_PORT', '51820')) wg_port = int(os.environ.get('WG_PORT', '51820'))
ip_range = os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')
print('--- Personal Internet Cell: Setup ---') print('--- Personal Internet Cell: Setup ---')
print(f' cell={cell_name} domain={domain} vpn={vpn_address} port={wg_port}') print(f' cell={cell_name} domain={domain} vpn={vpn_address} port={wg_port}')
@@ -212,6 +224,7 @@ def main():
priv, _pub = generate_wg_keys() priv, _pub = generate_wg_keys()
write_wg0_conf(priv, vpn_address, wg_port) write_wg0_conf(priv, vpn_address, wg_port)
write_cell_config(cell_name, domain, wg_port) write_cell_config(cell_name, domain, wg_port)
write_compose_env(ip_range)
print() print()
print('--- Setup complete! Run: make start ---') print('--- Setup complete! Run: make start ---')
+53 -58
View File
@@ -80,72 +80,67 @@ class TestGetVirtualIps(unittest.TestCase):
self.assertEqual(vips['webdav'], '10.10.0.24') self.assertEqual(vips['webdav'], '10.10.0.24')
class TestUpdateDockerComposeIps(unittest.TestCase): class TestWriteEnvFile(unittest.TestCase):
COMPOSE_TEMPLATE = """\
version: '3.3'
services:
caddy:
networks:
cell-network:
ipv4_address: 172.20.0.2
dns:
networks:
cell-network:
ipv4_address: 172.20.0.3
api:
networks:
cell-network:
ipv4_address: 172.20.0.10
networks:
cell-network:
ipam:
config:
- subnet: 172.20.0.0/16
"""
def setUp(self): def setUp(self):
self.tmp = tempfile.NamedTemporaryFile(mode='w', suffix='.yml', delete=False) self.tmp = tempfile.mkdtemp()
self.tmp.write(self.COMPOSE_TEMPLATE) self.env_path = os.path.join(self.tmp, '.env')
self.tmp.close()
def tearDown(self): def tearDown(self):
os.unlink(self.tmp.name) import shutil
shutil.rmtree(self.tmp)
def test_returns_false_for_missing_file(self): def test_creates_file(self):
self.assertFalse( ip_utils.write_env_file('172.20.0.0/16', self.env_path)
ip_utils.update_docker_compose_ips('172.20.0.0/16', '10.0.0.0/24', '/nonexistent/path.yml') self.assertTrue(os.path.exists(self.env_path))
)
def test_subnet_updated(self):
ip_utils.update_docker_compose_ips('172.20.0.0/16', '10.0.0.0/24', self.tmp.name)
with open(self.tmp.name) as f:
content = f.read()
self.assertIn('10.0.0.0/24', content)
self.assertNotIn('172.20.0.0/16', content)
def test_caddy_ip_updated(self):
ip_utils.update_docker_compose_ips('172.20.0.0/16', '10.0.0.0/24', self.tmp.name)
with open(self.tmp.name) as f:
content = f.read()
self.assertIn('10.0.0.2', content)
self.assertNotIn('172.20.0.2', content)
def test_api_ip_updated(self):
ip_utils.update_docker_compose_ips('172.20.0.0/16', '10.0.0.0/24', self.tmp.name)
with open(self.tmp.name) as f:
content = f.read()
self.assertIn('10.0.0.10', content)
self.assertNotIn('172.20.0.10', content)
def test_returns_true_on_success(self): def test_returns_true_on_success(self):
result = ip_utils.update_docker_compose_ips('172.20.0.0/16', '10.0.0.0/24', self.tmp.name) result = ip_utils.write_env_file('172.20.0.0/16', self.env_path)
self.assertTrue(result) self.assertTrue(result)
def test_noop_when_ranges_same(self): def test_returns_false_on_unwritable_path(self):
ip_utils.update_docker_compose_ips('172.20.0.0/16', '172.20.0.0/16', self.tmp.name) result = ip_utils.write_env_file('172.20.0.0/16', '/nonexistent/deep/path/.env')
with open(self.tmp.name) as f: self.assertFalse(result)
content = f.read()
self.assertEqual(content, self.COMPOSE_TEMPLATE) def test_contains_cell_network(self):
ip_utils.write_env_file('172.20.0.0/16', self.env_path)
content = open(self.env_path).read()
self.assertIn('CELL_NETWORK=172.20.0.0/16', content)
def test_contains_caddy_ip(self):
ip_utils.write_env_file('172.20.0.0/16', self.env_path)
content = open(self.env_path).read()
self.assertIn('CADDY_IP=172.20.0.2', content)
def test_contains_all_env_vars(self):
ip_utils.write_env_file('172.20.0.0/16', self.env_path)
content = open(self.env_path).read()
for var in ip_utils.ENV_VAR_NAMES.values():
self.assertIn(var + '=', content, f'{var} missing from .env')
def test_custom_subnet_generates_correct_ips(self):
ip_utils.write_env_file('10.5.0.0/24', self.env_path)
content = open(self.env_path).read()
self.assertIn('CELL_NETWORK=10.5.0.0/24', content)
self.assertIn('CADDY_IP=10.5.0.2', content)
self.assertIn('DNS_IP=10.5.0.3', content)
self.assertIn('API_IP=10.5.0.10', content)
self.assertNotIn('172.20', content)
def test_overwrite_updates_ips(self):
ip_utils.write_env_file('172.20.0.0/16', self.env_path)
ip_utils.write_env_file('10.0.0.0/24', self.env_path)
content = open(self.env_path).read()
self.assertIn('CADDY_IP=10.0.0.2', content)
self.assertNotIn('172.20', content)
def test_each_line_is_key_equals_value(self):
ip_utils.write_env_file('172.20.0.0/16', self.env_path)
for line in open(self.env_path).read().splitlines():
if line.strip():
self.assertIn('=', line, f'Bad line format: {line!r}')
key, _, val = line.partition('=')
self.assertTrue(key.isupper() or '_' in key)
self.assertTrue(val.strip())
if __name__ == '__main__': if __name__ == '__main__':
+116 -17
View File
@@ -1,5 +1,5 @@
import { BrowserRouter as Router, Routes, Route } from 'react-router-dom'; import { BrowserRouter as Router, Routes, Route } from 'react-router-dom';
import { useState, useEffect } from 'react'; import { useState, useEffect, useCallback } from 'react';
import { import {
Home, Home,
Users, Users,
@@ -14,9 +14,11 @@ import {
Key, Key,
Package2, Package2,
Settings as SettingsIcon, Settings as SettingsIcon,
Link2 Link2,
RefreshCw,
AlertTriangle,
} from 'lucide-react'; } from 'lucide-react';
import { healthAPI } from './services/api'; import { healthAPI, cellAPI } from './services/api';
import { ConfigProvider } from './contexts/ConfigContext'; import { ConfigProvider } from './contexts/ConfigContext';
import Sidebar from './components/Sidebar'; import Sidebar from './components/Sidebar';
import Dashboard from './pages/Dashboard'; import Dashboard from './pages/Dashboard';
@@ -33,27 +35,120 @@ import Vault from './pages/Vault';
import ContainerDashboard from './components/ContainerDashboard'; import ContainerDashboard from './components/ContainerDashboard';
import CellNetwork from './pages/CellNetwork'; import CellNetwork from './pages/CellNetwork';
function PendingRestartBanner({ pending, onApply }) {
const [confirming, setConfirming] = useState(false);
const [applying, setApplying] = useState(false);
const handleApply = async () => {
setApplying(true);
setConfirming(false);
try {
await onApply();
} finally {
setApplying(false);
}
};
return (
<>
<div className="mb-6 bg-warning-50 border border-warning-300 rounded-lg p-4">
<div className="flex items-center justify-between">
<div className="flex items-start gap-3">
<AlertTriangle className="h-5 w-5 text-warning-500 mt-0.5 flex-shrink-0" />
<div>
<p className="text-sm font-medium text-warning-800">
Configuration changes pending containers need restart
</p>
{pending.changes?.length > 0 && (
<ul className="mt-1 text-xs text-warning-700 list-disc list-inside">
{pending.changes.map((c, i) => <li key={i}>{c}</li>)}
</ul>
)}
</div>
</div>
<button
onClick={() => setConfirming(true)}
disabled={applying}
className="ml-4 flex-shrink-0 flex items-center gap-1.5 px-3 py-1.5 bg-warning-600 hover:bg-warning-700 disabled:opacity-50 text-white text-sm font-medium rounded-md transition-colors"
>
<RefreshCw className={`h-4 w-4 ${applying ? 'animate-spin' : ''}`} />
{applying ? 'Restarting…' : 'Apply Now'}
</button>
</div>
</div>
{confirming && (
<div className="fixed inset-0 z-50 flex items-center justify-center bg-black/40">
<div className="bg-white rounded-xl shadow-2xl p-6 max-w-sm w-full mx-4">
<div className="flex items-center gap-3 mb-3">
<AlertTriangle className="h-6 w-6 text-warning-500 flex-shrink-0" />
<h3 className="text-base font-semibold text-gray-900">Restart containers?</h3>
</div>
<p className="text-sm text-gray-600 mb-5">
All containers will be restarted to apply the new configuration.
The UI will be briefly unavailable during the restart.
</p>
<div className="flex gap-3 justify-end">
<button
onClick={() => setConfirming(false)}
className="px-4 py-2 text-sm font-medium text-gray-700 bg-gray-100 hover:bg-gray-200 rounded-md transition-colors"
>
Cancel
</button>
<button
onClick={handleApply}
className="px-4 py-2 text-sm font-medium text-white bg-warning-600 hover:bg-warning-700 rounded-md transition-colors"
>
Restart now
</button>
</div>
</div>
</div>
)}
</>
);
}
function App() { function App() {
const [isOnline, setIsOnline] = useState(false); const [isOnline, setIsOnline] = useState(false);
const [isLoading, setIsLoading] = useState(true); const [isLoading, setIsLoading] = useState(true);
const [pending, setPending] = useState({ needs_restart: false, changes: [] });
const checkHealth = useCallback(async () => {
try {
await healthAPI.check();
setIsOnline(true);
} catch {
setIsOnline(false);
} finally {
setIsLoading(false);
}
}, []);
const checkPending = useCallback(async () => {
try {
const res = await cellAPI.getPending();
setPending(res.data);
} catch {
// ignore not critical
}
}, []);
useEffect(() => { useEffect(() => {
const checkHealth = async () => {
try {
await healthAPI.check();
setIsOnline(true);
} catch (error) {
console.error('Backend not available:', error);
setIsOnline(false);
} finally {
setIsLoading(false);
}
};
checkHealth(); checkHealth();
const interval = setInterval(checkHealth, 5000); // Check every 30 seconds checkPending();
const healthInterval = setInterval(checkHealth, 5000);
const pendingInterval = setInterval(checkPending, 5000);
return () => {
clearInterval(healthInterval);
clearInterval(pendingInterval);
};
}, [checkHealth, checkPending]);
return () => clearInterval(interval); const handleApply = useCallback(async () => {
await cellAPI.applyPending();
// Optimistically clear the banner; containers are restarting
setPending({ needs_restart: false, changes: [] });
}, []); }, []);
const navigation = [ const navigation = [
@@ -113,6 +208,10 @@ function App() {
</div> </div>
)} )}
{isOnline && pending.needs_restart && (
<PendingRestartBanner pending={pending} onApply={handleApply} />
)}
<Routes> <Routes>
<Route path="/" element={<Dashboard isOnline={isOnline} />} /> <Route path="/" element={<Dashboard isOnline={isOnline} />} />
<Route path="/peers" element={<Peers />} /> <Route path="/peers" element={<Peers />} />
+2
View File
@@ -43,6 +43,8 @@ export const cellAPI = {
deleteBackup: (id) => api.delete(`/api/config/backups/${id}`), deleteBackup: (id) => api.delete(`/api/config/backups/${id}`),
exportConfig: (format = 'json') => api.get('/api/config/export', { params: { format } }), exportConfig: (format = 'json') => api.get('/api/config/export', { params: { format } }),
importConfig: (config, format = 'json') => api.post('/api/config/import', { config, format }), importConfig: (config, format = 'json') => api.post('/api/config/import', { config, format }),
getPending: () => api.get('/api/config/pending'),
applyPending: () => api.post('/api/config/apply'),
}; };
// Network Services API // Network Services API