Merge PIC v2 — phases 1-5 + CI/CD: wizard, HTTPS, DDNS, service store, connectivity
Unit Tests / test (push) Failing after 8m52s

This commit is contained in:
2026-05-09 12:11:15 -04:00
33 changed files with 8340 additions and 193 deletions
+65
View File
@@ -0,0 +1,65 @@
name: Release — Build and Push Images
on:
push:
tags:
- "v*.*.*"
jobs:
build-api:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Docker login to Gitea registry
uses: docker/login-action@v3
with:
registry: git.pic.ngo
username: ${{ secrets.REGISTRY_USER }}
password: ${{ secrets.REGISTRY_TOKEN }}
- name: Docker meta (api)
id: meta-api
uses: docker/metadata-action@v5
with:
images: git.pic.ngo/roof/pic-api
tags: |
type=raw,value=latest
type=ref,event=tag
- name: Build and push pic-api
uses: docker/build-push-action@v5
with:
context: ./api
push: true
tags: ${{ steps.meta-api.outputs.tags }}
build-webui:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Docker login to Gitea registry
uses: docker/login-action@v3
with:
registry: git.pic.ngo
username: ${{ secrets.REGISTRY_USER }}
password: ${{ secrets.REGISTRY_TOKEN }}
- name: Docker meta (webui)
id: meta-webui
uses: docker/metadata-action@v5
with:
images: git.pic.ngo/roof/pic-webui
tags: |
type=raw,value=latest
type=ref,event=tag
- name: Build and push pic-webui
uses: docker/build-push-action@v5
with:
context: ./webui
push: true
tags: ${{ steps.meta-webui.outputs.tags }}
+25
View File
@@ -0,0 +1,25 @@
name: Unit Tests
on:
push:
branches: ["**"]
pull_request:
branches: ["**"]
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.11"
- name: Install dependencies
run: pip install -r api/requirements.txt
- name: Run unit tests
run: python3 -m pytest tests/ --ignore=tests/e2e --ignore=tests/integration -q
+41 -15
View File
@@ -2,9 +2,9 @@
# Provides easy commands for managing the cell
.PHONY: help start stop restart status logs clean setup check-deps init-peers \
update reinstall uninstall \
update reinstall uninstall install \
build build-api build-webui \
start-dns start-api start-wg start-webui \
start-core start-dns start-api start-wg start-webui \
backup restore \
test test-all test-unit test-coverage test-api test-cli \
test-phase1 test-phase2 test-phase3 test-phase4 test-all-phases \
@@ -17,6 +17,9 @@
# Detect docker compose command (v2 plugin preferred, fallback to v1 standalone)
DC := $(shell docker compose version >/dev/null 2>&1 && echo "docker compose" || echo "docker-compose")
# Full compose command: includes docker-compose.services.yml when it exists
DCF = $(DC) $(if $(wildcard docker-compose.services.yml),-f docker-compose.yml -f docker-compose.services.yml,-f docker-compose.yml)
# Default target
help:
@echo "Personal Internet Cell - Management Commands"
@@ -93,12 +96,12 @@ init-peers:
start:
@echo "Starting Personal Internet Cell..."
PUID=$$(id -u) PGID=$$(id -g) $(DC) up -d --build
PUID=$$(id -u) PGID=$$(id -g) $(DCF) --profile full up -d --build
@echo "Services started. Check status with 'make status'"
stop:
@echo "Stopping Personal Internet Cell..."
PUID=$$(id -u) PGID=$$(id -g) $(DC) down
PUID=$$(id -u) PGID=$$(id -g) $(DCF) --profile full down
@echo "Services stopped."
restart:
@@ -109,16 +112,16 @@ restart:
status:
@echo "Personal Internet Cell Status:"
@echo "================================"
$(DC) ps
$(DCF) ps
@echo ""
@echo "API Status:"
@curl -s http://localhost:3000/health || echo "API not responding"
logs:
$(DC) logs -f
$(DCF) logs -f
logs-%:
$(DC) logs -f $*
$(DCF) logs -f $*
shell-%:
docker exec -it cell-$* /bin/bash 2>/dev/null || docker exec -it cell-$* /bin/sh
@@ -135,17 +138,31 @@ update:
$(MAKE) setup; \
fi
@echo "Rebuilding and restarting services..."
PUID=$$(id -u) PGID=$$(id -g) $(DC) up -d --build
PUID=$$(id -u) PGID=$$(id -g) $(DCF) --profile full up -d --build
@echo "Update complete. Run 'make status' to verify."
reinstall:
@echo "Reinstalling Personal Internet Cell from scratch..."
PUID=$$(id -u) PGID=$$(id -g) $(DC) down -v 2>/dev/null || true
PUID=$$(id -u) PGID=$$(id -g) $(DCF) --profile full down -v 2>/dev/null || true
@sudo rm -rf config/ data/
@$(MAKE) setup
@$(MAKE) start
@echo "Reinstall complete."
install:
@if [ -f /opt/pic/.installed ] && [ "$(FORCE)" != "1" ]; then \
echo "Already installed. Run 'make update' to update, or 'make install FORCE=1' to reinstall."; \
exit 0; \
fi
@echo "Running setup..."
@$(MAKE) setup
@echo "Installing systemd unit..."
@sudo cp scripts/pic.service /etc/systemd/system/pic.service
@-sudo systemctl daemon-reload && sudo systemctl enable pic
@sudo mkdir -p /opt/pic
@sudo touch /opt/pic/.installed
@echo "Installation complete. Run 'make start-core' to start core services."
uninstall:
@echo ""
@echo "This will stop and remove all containers."
@@ -155,20 +172,24 @@ uninstall:
case "$$ans" in \
y|Y) \
echo "Stopping containers and removing images..."; \
PUID=$$(id -u) PGID=$$(id -g) $(DC) down -v --rmi all 2>/dev/null || true; \
PUID=$$(id -u) PGID=$$(id -g) $(DCF) --profile full down -v --rmi all 2>/dev/null || true; \
echo "Deleting config/ and data/..."; \
sudo rm -rf config/ data/; \
echo "Uninstall complete. Git repo and scripts remain."; \
;; \
n|N|"") \
echo "Stopping and removing containers (keeping images and data)..."; \
PUID=$$(id -u) PGID=$$(id -g) $(DC) down 2>/dev/null || true; \
PUID=$$(id -u) PGID=$$(id -g) $(DCF) --profile full down 2>/dev/null || true; \
echo "Done. Images, config/ and data/ are untouched. Run 'make start' to bring it back up."; \
;; \
*) \
echo "Cancelled."; \
;; \
esac
@-sudo systemctl disable pic 2>/dev/null || true
@-sudo rm -f /etc/systemd/system/pic.service
@-sudo rm -f /opt/pic/.installed
@echo "Note: Data volumes were not deleted. To remove all data, manually delete config/ and data/."
# ── Build ─────────────────────────────────────────────────────────────────────
@@ -188,17 +209,22 @@ build-webui:
# ── Individual services ───────────────────────────────────────────────────────
start-core:
@echo "Starting core services (caddy, dns, wireguard, api, webui)..."
PUID=$$(id -u) PGID=$$(id -g) $(DCF) --profile core up -d --build
@echo "Core services started. Run 'make start' to also bring up optional services."
start-dns:
$(DC) up -d dns
$(DC) --profile core up -d dns
start-api:
$(DC) up -d api
$(DC) --profile core up -d api
start-wg:
$(DC) up -d wireguard
$(DC) --profile core up -d wireguard
start-webui:
$(DC) up -d webui
$(DC) --profile core up -d webui
# ── Maintenance ───────────────────────────────────────────────────────────────
+165 -4
View File
@@ -40,7 +40,10 @@ from managers import (
network_manager, wireguard_manager, peer_registry,
email_manager, calendar_manager, file_manager,
routing_manager, vault_manager, container_manager,
cell_link_manager, auth_manager,
cell_link_manager, auth_manager, setup_manager,
caddy_manager,
ddns_manager, service_store_manager,
connectivity_manager,
firewall_manager, EventType,
)
# Re-exports: tests do `from app import CellManager` and `from app import _resolve_peer_dns`
@@ -158,6 +161,28 @@ def enrich_log_context():
'user': user
})
@app.before_request
def enforce_setup():
"""Block API requests until the first-run wizard has been completed.
The setup routes, /health, and all non-/api/ paths are always allowed
through. Any other /api/* request while setup is incomplete receives
a 428 with a redirect hint to /setup.
Skipped entirely when app.config['TESTING'] is True so unit tests remain
unaffected without needing to mark setup as complete.
"""
if app.config.get('TESTING'):
return None
path = request.path
if (path.startswith('/api/setup') or
path == '/health' or
not path.startswith('/api/')):
return None
if not setup_manager.is_setup_complete():
return jsonify({'error': 'Setup required', 'redirect': '/setup'}), 428
@app.before_request
def enforce_auth():
"""Enforce session-based authentication and role-based access control.
@@ -232,7 +257,7 @@ def check_csrf():
if request.method not in ('POST', 'PUT', 'DELETE', 'PATCH'):
return None
path = request.path
if not path.startswith('/api/') or path.startswith('/api/auth/'):
if not path.startswith('/api/') or path.startswith('/api/auth/') or path.startswith('/api/setup/'):
return None
# peer-sync uses IP+pubkey auth — no session, no CSRF token possible
if path.startswith('/api/cells/peer-sync/'):
@@ -324,6 +349,9 @@ def _apply_startup_enforcement():
wireguard_manager.ensure_postup_dnat()
firewall_manager.ensure_dns_dnat()
firewall_manager.ensure_service_dnat()
# Allow Docker containers (cell-dns) to reach remote cell subnets via wg0.
firewall_manager.ensure_wg_masquerade()
firewall_manager.ensure_cell_subnet_routes(cell_links)
# Restore any cell link WireGuard peers that were lost from wg0.conf
# (happens if the container was rebuilt, wg0.conf was reset, etc.)
_restore_cell_wg_peers(cell_links)
@@ -347,6 +375,16 @@ def _apply_startup_enforcement():
sync_summary = cell_link_manager.replay_pending_pushes()
if sync_summary.get('attempted'):
logger.info(f"Startup permission sync: {sync_summary}")
# Service store: re-apply firewall/caddy rules for installed services
try:
service_store_manager.reapply_on_startup()
except Exception as _sse:
logger.warning(f"service_store reapply_on_startup failed (non-fatal): {_sse}")
# Phase 5: re-apply extended-connectivity policy routing rules
try:
connectivity_manager.apply_routes()
except Exception as _ce:
logger.warning(f"connectivity apply_routes failed (non-fatal): {_ce}")
except Exception as e:
logger.warning(f"Startup enforcement failed (non-fatal): {e}")
@@ -406,6 +444,10 @@ service_bus.register_service('container', container_manager)
# Register auth blueprint
app.register_blueprint(auth_routes.auth_bp)
# Register setup blueprint (no auth required — runs before any account exists)
from routes.setup import setup_bp
app.register_blueprint(setup_bp)
# Register service blueprints (routes extracted from this file)
from routes.email import bp as _email_bp
from routes.calendar import bp as _calendar_bp
@@ -434,6 +476,9 @@ app.register_blueprint(_services_bp)
app.register_blueprint(_peer_dashboard_bp)
app.register_blueprint(_config_bp)
from routes.service_store import store_bp
app.register_blueprint(store_bp)
# Re-export config helpers so existing test imports/patches keep working
from routes.config import (
_set_pending_restart, _clear_pending_restart,
@@ -523,15 +568,34 @@ def health_monitor_loop():
with app.app_context():
health_result = perform_health_check()
health_history.appendleft(health_result)
# Publish health check event
service_bus.publish_event(EventType.HEALTH_CHECK, 'api', health_result)
# Re-anchor stateful rule every cycle: wg0 PostUp uses -I FORWARD which
# pushes ESTABLISHED,RELATED down below per-peer DROPs on restart.
firewall_manager.ensure_forward_stateful()
# Caddy health monitor: 3 consecutive failures triggers a restart.
try:
if caddy_manager.check_caddy_health():
caddy_manager.reset_health_failures()
else:
count = caddy_manager.increment_health_failure()
if count >= 3:
logger.warning(
"Caddy health check failed %d times \u2014 restarting",
count,
)
container_manager.restart_container('cell-caddy')
caddy_manager.reset_health_failures()
except Exception as _caddy_err:
logger.error("Caddy health monitor error: %s", _caddy_err)
time.sleep(60) # Check every 60 seconds
# Start health monitor thread
health_monitor_thread = threading.Thread(target=health_monitor_loop, daemon=True)
health_monitor_thread.start()
# Start DDNS heartbeat thread (updates public IP every 5 minutes when a provider is configured)
ddns_manager.start_heartbeat()
def _local_subnets():
"""Return all subnets the container is directly connected to (from routing table)."""
import ipaddress as _ipa, socket as _sock, struct as _struct
@@ -666,6 +730,103 @@ def clear_health_history():
service_alert_counters = {}
return jsonify({'message': 'Health history cleared'})
# ---------------------------------------------------------------------------
# Phase 5 — Extended connectivity routes
# ---------------------------------------------------------------------------
@app.route('/api/connectivity/status', methods=['GET'])
def connectivity_status():
"""Return connectivity manager status (configured exits, peer counts)."""
try:
return jsonify(connectivity_manager.get_status())
except Exception as e:
logger.error(f"connectivity_status: {e}")
return jsonify({'error': str(e)}), 500
@app.route('/api/connectivity/exits', methods=['GET'])
def connectivity_list_exits():
"""List configured exits and their state."""
try:
return jsonify({'exits': connectivity_manager.list_exits()})
except Exception as e:
logger.error(f"connectivity_list_exits: {e}")
return jsonify({'error': str(e)}), 500
@app.route('/api/connectivity/exits/wireguard', methods=['POST'])
def connectivity_upload_wireguard():
"""Upload an external WireGuard config (becomes wg_ext0)."""
try:
data = request.get_json(silent=True) or {}
conf_text = data.get('conf_text', '')
if not isinstance(conf_text, str) or not conf_text.strip():
return jsonify({'ok': False, 'error': 'conf_text is required'}), 400
result = connectivity_manager.upload_wireguard_ext(conf_text)
if result.get('ok'):
return jsonify(result)
return jsonify(result), 400
except Exception as e:
logger.error(f"connectivity_upload_wireguard: {e}")
return jsonify({'error': str(e)}), 500
@app.route('/api/connectivity/exits/openvpn', methods=['POST'])
def connectivity_upload_openvpn():
"""Upload an OpenVPN profile (.ovpn)."""
try:
data = request.get_json(silent=True) or {}
ovpn_text = data.get('ovpn_text', '')
name = data.get('name', 'default')
if not isinstance(ovpn_text, str) or not ovpn_text.strip():
return jsonify({'ok': False, 'error': 'ovpn_text is required'}), 400
result = connectivity_manager.upload_openvpn(ovpn_text, name=name)
if result.get('ok'):
return jsonify(result)
return jsonify(result), 400
except Exception as e:
logger.error(f"connectivity_upload_openvpn: {e}")
return jsonify({'error': str(e)}), 500
@app.route('/api/connectivity/exits/apply', methods=['POST'])
def connectivity_apply_routes():
"""Idempotently re-apply all connectivity policy routing rules."""
try:
result = connectivity_manager.apply_routes()
return jsonify(result)
except Exception as e:
logger.error(f"connectivity_apply_routes: {e}")
return jsonify({'error': str(e)}), 500
@app.route('/api/connectivity/peers/<peer_name>/exit', methods=['PUT'])
def connectivity_set_peer_exit(peer_name: str):
"""Assign a peer to an egress exit type."""
try:
data = request.get_json(silent=True) or {}
exit_via = data.get('exit_via')
if not isinstance(exit_via, str):
return jsonify({'ok': False, 'error': 'exit_via is required'}), 400
result = connectivity_manager.set_peer_exit(peer_name, exit_via)
if result.get('ok'):
return jsonify(result)
return jsonify(result), 400
except Exception as e:
logger.error(f"connectivity_set_peer_exit({peer_name}): {e}")
return jsonify({'error': str(e)}), 500
@app.route('/api/connectivity/peers', methods=['GET'])
def connectivity_get_peer_exits():
"""Return {peer_name: exit_type} for all peers."""
try:
return jsonify({'peers': connectivity_manager.get_peer_exits()})
except Exception as e:
logger.error(f"connectivity_get_peer_exits: {e}")
return jsonify({'error': str(e)}), 500
if __name__ == '__main__':
debug = os.environ.get('FLASK_DEBUG', '0') == '1'
app.run(host='0.0.0.0', port=3000, debug=debug)
+397
View File
@@ -0,0 +1,397 @@
#!/usr/bin/env python3
"""
Caddy Manager for Personal Internet Cell.
Generates a Caddyfile based on the current identity (domain mode, cell name,
domain) and the list of installed services that contribute reverse-proxy
routes. Uses Caddy's admin API on http://127.0.0.1:2019 to hot-reload the
config without restarting the container.
Domain modes supported:
lan — local-only, internal CA, HTTP + self-signed HTTPS via
/etc/caddy/internal/{cert,key}.pem
pic_ngo — DNS-01 ACME via the pic_ngo Caddy plugin (wildcard cert)
cloudflare — DNS-01 ACME via the cloudflare Caddy plugin (wildcard cert)
duckdns — DNS-01 ACME via the duckdns Caddy plugin
http01 — HTTP-01 ACME (no wildcard); each subdomain gets its own
server block (used by No-IP, FreeDNS, etc.)
For all ACME modes ``acme_ca`` is read from the ``ACME_CA_URL`` env var so
tests / staging can point at Pebble or LE-staging without a code change.
Routes for installed services are inserted before the catch-all ``handle``
in the main server block (or, for ``http01``, written as their own per-host
blocks).
"""
import logging
import os
from typing import Any, Dict, List, Optional
import requests
from base_service_manager import BaseServiceManager
logger = logging.getLogger(__name__)
# Live Caddyfile path inside the cell-api container (host path is
# ./config/caddy/Caddyfile, mounted at /app/config-caddy). May be overridden
# in tests via the CADDYFILE_PATH env var.
LIVE_CADDYFILE = os.environ.get('CADDYFILE_PATH', '/app/config-caddy/Caddyfile')
# Caddy admin API base — local to the cell-api container only because Caddy
# binds 2019 on 127.0.0.1. In production the API and Caddy both run with
# host networking via the bridge, so this hostname must be set to the Caddy
# container hostname (or admin enabled cluster-wide). We default to
# localhost to match the dev/test wiring.
CADDY_ADMIN_URL = os.environ.get('CADDY_ADMIN_URL', 'http://cell-caddy:2019')
class CaddyManager(BaseServiceManager):
"""Manages Caddy reverse-proxy configuration and runtime health."""
def __init__(self, config_manager=None,
data_dir: str = '/app/data',
config_dir: str = '/app/config'):
super().__init__('caddy', data_dir, config_dir)
self.config_manager = config_manager
self.container_name = 'cell-caddy'
self.caddyfile_path = LIVE_CADDYFILE
# Consecutive health-check failure counter (reset on success or when
# the caller restarts the container).
self._health_failures = 0
# ── BaseServiceManager required ───────────────────────────────────────
def get_status(self) -> Dict[str, Any]:
"""Return basic Caddy status (running + admin-API reachable)."""
healthy = self.check_caddy_health()
return {
'service': self.service_name,
'running': healthy,
'admin_url': CADDY_ADMIN_URL,
'caddyfile_path': self.caddyfile_path,
'consecutive_failures': self._health_failures,
}
def test_connectivity(self) -> Dict[str, Any]:
"""Ping the Caddy admin API."""
ok = self.check_caddy_health()
return {
'success': ok,
'admin_url': CADDY_ADMIN_URL,
}
# ── Caddyfile generation ──────────────────────────────────────────────
def generate_caddyfile(self, identity: Dict[str, Any],
installed_services: List[Dict[str, Any]]) -> str:
"""Generate a complete Caddyfile based on identity and services.
Args:
identity: identity dict from ``ConfigManager.get_identity()``.
Expected keys: ``cell_name``, ``domain_mode``, optional
``custom_domain``, ``acme_email``.
installed_services: list of service dicts; each may have a
``caddy_route`` string with one or more
Caddyfile directives (e.g.
``"handle /calendar* {\\n reverse_proxy ..."``).
Returns:
Caddyfile text.
"""
identity = identity or {}
cell_name = identity.get('cell_name', 'cell')
domain_mode = identity.get('domain_mode', 'lan')
# Aggregate the per-service route snippets that go inside the main
# server block (everything except http01 mode). Each route is
# indented to four spaces to keep the Caddyfile readable.
service_routes = self._collect_service_routes(installed_services)
# Core routes always present in the main server block. Inserted
# *after* installed-service routes so a more specific /api/* on a
# service can never shadow the API itself (no service should use
# /api anyway, but this protects us from misconfigured plugins).
core_routes = (
" handle /api/* {\n"
" reverse_proxy cell-api:3000\n"
" }\n"
" handle {\n"
" reverse_proxy cell-webui:80\n"
" }"
)
if domain_mode == 'lan':
return self._caddyfile_lan(cell_name, service_routes, core_routes)
if domain_mode == 'pic_ngo':
return self._caddyfile_pic_ngo(cell_name, service_routes, core_routes)
if domain_mode == 'cloudflare':
custom_domain = identity.get('custom_domain', f'{cell_name}.local')
return self._caddyfile_cloudflare(
custom_domain, service_routes, core_routes
)
if domain_mode == 'duckdns':
return self._caddyfile_duckdns(cell_name, service_routes, core_routes)
if domain_mode == 'http01':
host = identity.get('custom_domain', f'{cell_name}.noip.me')
return self._caddyfile_http01(host, installed_services, core_routes)
# Fallback to lan so we always emit a valid Caddyfile.
logger.warning("Unknown domain_mode %r; falling back to 'lan'", domain_mode)
return self._caddyfile_lan(cell_name, service_routes, core_routes)
# ── per-mode generators ───────────────────────────────────────────────
@staticmethod
def _global_acme_block(email: Optional[str]) -> str:
"""Return the ``{ ... }`` global block for an ACME-enabled mode."""
lines = ["{"]
# Bind admin API on all interfaces so cell-api can reach cell-caddy
# across the Docker bridge (default 127.0.0.1 is unreachable cross-container).
lines.append(" admin 0.0.0.0:2019")
if email:
lines.append(f" email {email}")
# Always allow tests to override the ACME directory via env var.
lines.append(" acme_ca {$ACME_CA_URL}")
lines.append("}")
return "\n".join(lines)
@staticmethod
def _indent_routes(routes: str, spaces: int = 4) -> str:
"""Indent a multi-line route block by ``spaces`` columns."""
if not routes:
return ""
prefix = " " * spaces
return "\n".join(prefix + line if line.strip() else line
for line in routes.splitlines())
def _collect_service_routes(self,
installed_services: List[Dict[str, Any]]) -> str:
"""Concatenate ``caddy_route`` strings from installed services."""
chunks: List[str] = []
for svc in installed_services or []:
route = (svc or {}).get('caddy_route')
if route:
chunks.append(route.strip("\n"))
return "\n".join(chunks)
def _caddyfile_lan(self, cell_name: str,
service_routes: str, core_routes: str) -> str:
"""LAN mode: HTTP only + internal-CA TLS, no ACME."""
body = []
if service_routes:
body.append(self._indent_routes(service_routes))
body.append(core_routes)
inner = "\n".join(body)
return (
"{\n"
" admin 0.0.0.0:2019\n"
" auto_https off\n"
"}\n"
"\n"
f"http://{cell_name}.cell, http://172.20.0.2:80 {{\n"
" tls /etc/caddy/internal/cert.pem /etc/caddy/internal/key.pem\n"
f"{inner}\n"
"}\n"
)
def _caddyfile_pic_ngo(self, cell_name: str,
service_routes: str, core_routes: str) -> str:
"""pic_ngo mode: wildcard DNS-01 via the pic_ngo plugin."""
body = []
if service_routes:
body.append(self._indent_routes(service_routes))
body.append(core_routes)
inner = "\n".join(body)
email = f"admin@{cell_name}.pic.ngo"
return (
f"{self._global_acme_block(email)}\n"
"\n"
f"*.{cell_name}.pic.ngo, {cell_name}.pic.ngo {{\n"
" tls {\n"
" dns pic_ngo {\n"
" token {$PIC_NGO_DDNS_TOKEN}\n"
" api_base_url {$PIC_NGO_DDNS_API}\n"
" }\n"
" }\n"
f"{inner}\n"
"}\n"
)
def _caddyfile_cloudflare(self, custom_domain: str,
service_routes: str, core_routes: str) -> str:
"""cloudflare mode: wildcard DNS-01 via the cloudflare plugin."""
body = []
if service_routes:
body.append(self._indent_routes(service_routes))
body.append(core_routes)
inner = "\n".join(body)
return (
f"{self._global_acme_block('{$ACME_EMAIL}')}\n"
"\n"
f"*.{custom_domain}, {custom_domain} {{\n"
" tls {\n"
" dns cloudflare {$CF_API_TOKEN}\n"
" }\n"
f"{inner}\n"
"}\n"
)
def _caddyfile_duckdns(self, cell_name: str,
service_routes: str, core_routes: str) -> str:
"""duckdns mode: DNS-01 via the duckdns plugin."""
body = []
if service_routes:
body.append(self._indent_routes(service_routes))
body.append(core_routes)
inner = "\n".join(body)
return (
f"{self._global_acme_block(None)}\n"
"\n"
f"*.{cell_name}.duckdns.org {{\n"
" tls {\n"
" dns duckdns {$DUCKDNS_TOKEN}\n"
" }\n"
f"{inner}\n"
"}\n"
)
def _caddyfile_http01(self, host: str,
installed_services: List[Dict[str, Any]],
core_routes: str) -> str:
"""http01 mode: no wildcard. Each service gets its own block."""
# Main host block — only the core routes (api + webui). Service
# routes that could otherwise be served as path-prefixes are NOT
# placed here because in http01 mode each service is intended to
# live on its own subdomain (otherwise it could also use a path
# prefix here, but the spec calls for separate blocks).
out = [self._global_acme_block('{$ACME_EMAIL}'), ""]
out.append(f"{host} {{")
out.append(core_routes)
out.append("}")
# One block per installed service that has a caddy_route.
for svc in installed_services or []:
if not svc:
continue
route = svc.get('caddy_route')
name = svc.get('name') or svc.get('subdomain')
if not route or not name:
continue
out.append("")
out.append(f"{name}.{host} {{")
out.append(self._indent_routes(route))
out.append("}")
return "\n".join(out) + "\n"
# ── filesystem + admin-API operations ─────────────────────────────────
def write_caddyfile(self, caddyfile_content: str) -> bool:
"""Write the Caddyfile and reload Caddy via the admin API.
Writes in-place (same inode) so Docker bind-mounts continue to see
the file. Returns True if both write and reload succeed.
"""
try:
os.makedirs(os.path.dirname(os.path.abspath(self.caddyfile_path)),
exist_ok=True)
except (PermissionError, OSError) as e:
logger.warning("Could not create Caddyfile dir: %s", e)
try:
with open(self.caddyfile_path, 'w') as f:
f.write(caddyfile_content)
f.flush()
try:
os.fsync(f.fileno())
except OSError:
pass
logger.info("Wrote Caddyfile to %s (%d bytes)",
self.caddyfile_path, len(caddyfile_content))
except Exception as e:
logger.error("Failed to write Caddyfile: %s", e)
return False
return self.reload_caddy()
def reload_caddy(self) -> bool:
"""POST the current Caddyfile to the Caddy admin API for a hot reload.
Returns True on HTTP 200, False otherwise.
"""
try:
with open(self.caddyfile_path, 'r') as f:
caddyfile = f.read()
except Exception as e:
logger.error("Cannot read Caddyfile for reload: %s", e)
return False
url = f"{CADDY_ADMIN_URL}/load"
try:
resp = requests.post(
url,
data=caddyfile,
headers={'Content-Type': 'text/caddyfile'},
timeout=10,
)
except requests.RequestException as e:
logger.error("Caddy admin reload failed: %s", e)
return False
if resp.status_code == 200:
logger.info("Caddy reload succeeded (status=200)")
return True
logger.error(
"Caddy reload failed: status=%s body=%s",
resp.status_code, resp.text[:500],
)
return False
def check_caddy_health(self) -> bool:
"""GET the Caddy admin API root. Returns True on HTTP 200."""
try:
resp = requests.get(CADDY_ADMIN_URL + "/", timeout=5)
except requests.RequestException as e:
logger.debug("Caddy health check error: %s", e)
return False
return resp.status_code == 200
# ── consecutive-failure bookkeeping ───────────────────────────────────
def get_health_failure_count(self) -> int:
"""Return the current consecutive failure count."""
return self._health_failures
def increment_health_failure(self) -> int:
"""Increment and return the consecutive failure count."""
self._health_failures += 1
return self._health_failures
def reset_health_failures(self) -> None:
"""Reset the consecutive failure counter to zero."""
self._health_failures = 0
# ── certificate status ────────────────────────────────────────────────
def regenerate_with_installed(self, installed_services: list) -> bool:
"""Regenerate Caddyfile with installed services and reload."""
identity = self.config_manager.get_identity()
content = self.generate_caddyfile(identity, installed_services)
return self.write_caddyfile(content)
def get_cert_status(self) -> Dict[str, Any]:
"""Return TLS cert status from identity['tls'] if present."""
default = {'status': 'unknown', 'expiry': None, 'days_remaining': None}
if not self.config_manager:
return default
try:
ident = self.config_manager.get_identity() or {}
except Exception as e:
logger.error("get_cert_status: failed to read identity: %s", e)
return default
tls = ident.get('tls') or {}
return {
'status': tls.get('status', 'unknown'),
'expiry': tls.get('expiry'),
'days_remaining': tls.get('days_remaining'),
}
+62 -1
View File
@@ -37,6 +37,12 @@ class ConfigManager:
pass
self.service_schemas = self._load_service_schemas()
self.configs = self._load_all_configs()
# Ensure _identity key always exists
if '_identity' not in self.configs:
self.configs['_identity'] = {}
# Phase 5: ensure connectivity section exists with empty defaults.
if 'connectivity' not in self.configs:
self.configs['connectivity'] = {'exits': {}, 'peer_exit_map': {}}
if not self.config_file.exists():
self._save_all_configs()
@@ -105,6 +111,14 @@ class ConfigManager:
'ca_configured': bool,
'fernet_configured': bool
}
},
'connectivity': {
'required': [],
'optional': ['exits', 'peer_exit_map'],
'types': {
'exits': dict,
'peer_exit_map': dict,
}
}
}
@@ -460,10 +474,57 @@ class ConfigManager:
# No-op for unified config, but keep for compatibility
pass
def get_identity(self) -> Dict[str, Any]:
"""Return the current identity configuration."""
return self.configs.get('_identity', {})
def set_identity_field(self, key: str, value: Any):
"""Set a single field in the identity configuration and persist."""
if '_identity' not in self.configs:
self.configs['_identity'] = {}
self.configs['_identity'][key] = value
self._save_all_configs()
def get_installed_services(self) -> dict:
return self.configs.get('_identity', {}).get('installed_services', {})
def set_installed_service(self, service_id: str, record: dict):
ident = self.configs.setdefault('_identity', {})
ident.setdefault('installed_services', {})[service_id] = record
self._save_all_configs()
def remove_installed_service(self, service_id: str):
ident = self.configs.setdefault('_identity', {})
ident.setdefault('installed_services', {}).pop(service_id, None)
ident.setdefault('service_ips', {}).pop(service_id, None)
self._save_all_configs()
# Phase 5 — Extended connectivity configuration helpers
def get_connectivity_config(self) -> Dict[str, Any]:
"""Return the full connectivity config (exits + peer_exit_map)."""
cfg = self.configs.get('connectivity')
if not isinstance(cfg, dict):
cfg = {'exits': {}, 'peer_exit_map': {}}
self.configs['connectivity'] = cfg
cfg.setdefault('exits', {})
cfg.setdefault('peer_exit_map', {})
return dict(cfg)
def set_connectivity_field(self, field: str, value: Any) -> bool:
"""Set a single field within the connectivity config and persist."""
cfg = self.configs.setdefault('connectivity', {'exits': {}, 'peer_exit_map': {}})
cfg[field] = value
try:
self._save_all_configs()
return True
except Exception as e:
logger.error(f"set_connectivity_field({field}): {e}")
return False
def get_all_configs(self) -> Dict[str, Dict]:
"""Get all service configurations"""
return self.configs.copy()
def get_config_summary(self) -> Dict[str, Any]:
"""Get summary of all configurations"""
summary = {
+543
View File
@@ -0,0 +1,543 @@
#!/usr/bin/env python3
"""
Connectivity Manager for Personal Internet Cell Phase 5 Extended Connectivity.
Provides per-peer egress routing through alternate exits (WireGuard external,
OpenVPN, Tor) via Linux policy routing (fwmark + ip rule + dedicated routing
tables) and dedicated iptables chains.
Architecture
------------
- A peer's `exit_via` field selects the egress path: "default", "wireguard_ext",
"openvpn", or "tor".
- Each non-default exit type is assigned a unique fwmark and a dedicated routing
table:
wireguard_ext mark 0x10 table 110 iface wg_ext0
openvpn mark 0x20 table 120 iface tun0
tor mark 0x30 table 130 (transparent proxy 9040)
- All rules live in dedicated PIC_CONNECTIVITY chains in the `mangle` and `nat`
tables so they can be flushed/rebuilt without touching firewall_manager rules.
- A kill-switch FORWARD DROP rule prevents leaks if the exit interface is down.
Container model
---------------
Each exit type runs in its own separate container; this manager only programs
policy routing rules in the WireGuard server container (cell-wireguard) where
peer traffic flows through.
Config files
------------
- WireGuard external: {config_dir}/connectivity/wireguard_ext/wg_ext0.conf
- OpenVPN: {config_dir}/connectivity/openvpn/<name>.ovpn
Both are validated to strip / reject hook directives that could execute
arbitrary commands on the host.
"""
import logging
import os
import re
import subprocess
from typing import Any, Dict, List, Optional
from base_service_manager import BaseServiceManager
logger = logging.getLogger(__name__)
WIREGUARD_CONTAINER = 'cell-wireguard'
# Lines we strip from uploaded WireGuard configs — these can run arbitrary
# host commands when wg-quick brings the interface up/down.
_WG_FORBIDDEN_PREFIXES = ('PostUp', 'PostDown', 'PreUp', 'PreDown')
# Lines we strip from uploaded OpenVPN configs — these execute external
# scripts/binaries on connect/disconnect.
_OVPN_FORBIDDEN_DIRECTIVES = (
'up', 'down', 'script-security', 'plugin',
'route-up', 'route-pre-down',
)
_NAME_RE = re.compile(r'^[a-z0-9_-]{1,32}$')
class ConnectivityManager(BaseServiceManager):
"""Manages alternate egress paths (extended connectivity) for peers."""
EXIT_TYPES = ("default", "wireguard_ext", "openvpn", "tor")
MARKS = {"wireguard_ext": 0x10, "openvpn": 0x20, "tor": 0x30}
TABLES = {"wireguard_ext": 110, "openvpn": 120, "tor": 130}
IFACES = {"wireguard_ext": "wg_ext0", "openvpn": "tun0"}
TOR_TRANS_PORT = 9040
TOR_DNS_PORT = 5353
CONNECTIVITY_CHAIN = 'PIC_CONNECTIVITY'
def __init__(self, config_manager=None, peer_registry=None,
data_dir: str = '/app/data', config_dir: str = '/app/config'):
super().__init__('connectivity', data_dir, config_dir)
self.config_manager = config_manager
self.peer_registry = peer_registry
# Config file directories
self.connectivity_config_dir = os.path.join(config_dir, 'connectivity')
self.wireguard_ext_dir = os.path.join(self.connectivity_config_dir, 'wireguard_ext')
self.openvpn_dir = os.path.join(self.connectivity_config_dir, 'openvpn')
for d in (self.connectivity_config_dir, self.wireguard_ext_dir, self.openvpn_dir):
self.safe_makedirs(d)
# Subscribe to ServiceBus CONFIG_CHANGED events so routes are
# reapplied if the underlying network changes. Done lazily —
# service_bus is a singleton imported at app startup.
self._subscribe_to_events()
# ── Event wiring ──────────────────────────────────────────────────────
def _subscribe_to_events(self) -> None:
"""Subscribe to network change events so routes auto-reapply."""
try:
from managers import service_bus, EventType
service_bus.subscribe_to_event(
EventType.CONFIG_CHANGED, self._on_network_changed
)
except Exception as e:
# Non-fatal: subscription is best-effort, manual apply still works.
logger.debug(f"connectivity: event subscribe skipped: {e}")
def _on_network_changed(self, event) -> None:
"""ServiceBus handler: re-apply routes when network config changes."""
try:
source = getattr(event, 'source', '')
if source not in ('network', 'wireguard', 'connectivity'):
return
logger.info(f"connectivity: re-applying routes due to {source} change")
self.apply_routes()
except Exception as e:
logger.warning(f"connectivity: on_network_changed failed (non-fatal): {e}")
# ── BaseServiceManager required ───────────────────────────────────────
def get_status(self) -> Dict[str, Any]:
"""Return status summary including configured exits and peer count."""
try:
exits_status: Dict[str, Dict[str, Any]] = {}
for exit_type in self.EXIT_TYPES:
if exit_type == "default":
continue
exits_status[exit_type] = self._exit_status(exit_type)
peers_with_exit = 0
if self.peer_registry is not None:
try:
for peer in self.peer_registry.list_peers():
if peer.get('exit_via', 'default') != 'default':
peers_with_exit += 1
except Exception as e:
logger.warning(f"get_status: peer count failed: {e}")
return {
'service': 'connectivity',
'running': True,
'exits': exits_status,
'peers_with_exit': peers_with_exit,
}
except Exception as e:
return self.handle_error(e, 'get_status')
def test_connectivity(self) -> Dict[str, Any]:
"""Minimal connectivity self-test."""
return {'success': True}
def get_config(self) -> Dict[str, Any]:
"""Return current connectivity config from config_manager."""
try:
if self.config_manager is not None and hasattr(
self.config_manager, 'get_connectivity_config'
):
return self.config_manager.get_connectivity_config()
except Exception as e:
logger.warning(f"get_config: config_manager lookup failed: {e}")
return {'exits': {}, 'peer_exit_map': {}}
# ── Public API ────────────────────────────────────────────────────────
def list_exits(self) -> List[Dict[str, Any]]:
"""List configured exits with current status."""
result: List[Dict[str, Any]] = []
for exit_type in self.EXIT_TYPES:
if exit_type == "default":
continue
entry = {'type': exit_type}
entry.update(self._exit_status(exit_type))
result.append(entry)
return result
def get_peer_exits(self) -> Dict[str, str]:
"""Return {peer_name: exit_type} for all peers."""
out: Dict[str, str] = {}
if self.peer_registry is None:
return out
try:
for peer in self.peer_registry.list_peers():
name = peer.get('peer')
if name:
out[name] = peer.get('exit_via', 'default')
except Exception as e:
logger.warning(f"get_peer_exits: {e}")
return out
def set_peer_exit(self, peer_name: str, exit_type: str) -> Dict[str, Any]:
"""Assign a peer to an egress path and apply the rule changes."""
if exit_type not in self.EXIT_TYPES:
return {
'ok': False,
'error': f"invalid exit_type {exit_type!r}; "
f"must be one of {self.EXIT_TYPES}",
}
if not isinstance(peer_name, str) or not re.match(r'^[A-Za-z0-9_.-]{1,64}$', peer_name):
return {'ok': False, 'error': f'invalid peer_name {peer_name!r}'}
if self.peer_registry is None:
return {'ok': False, 'error': 'peer_registry not available'}
try:
ok = self.peer_registry.set_peer_exit_via(peer_name, exit_type)
except Exception as e:
logger.error(f"set_peer_exit: registry update failed: {e}")
return {'ok': False, 'error': str(e)}
if not ok:
return {'ok': False, 'error': f'peer {peer_name!r} not found'}
try:
self.apply_routes()
except Exception as e:
logger.warning(f"set_peer_exit: apply_routes failed (non-fatal): {e}")
return {'ok': True, 'peer': peer_name, 'exit_via': exit_type}
def upload_wireguard_ext(self, conf_text: str) -> Dict[str, Any]:
"""Validate and store an external WireGuard config."""
try:
cleaned = self._validate_wg_conf(conf_text)
except ValueError as e:
return {'ok': False, 'error': str(e)}
path = os.path.join(self.wireguard_ext_dir, 'wg_ext0.conf')
try:
self._write_secure(path, cleaned)
except Exception as e:
logger.error(f"upload_wireguard_ext: write failed: {e}")
return {'ok': False, 'error': str(e)}
logger.info(f"connectivity: stored wg_ext0.conf ({len(cleaned)} bytes)")
return {'ok': True}
def upload_openvpn(self, ovpn_text: str, name: str = 'default') -> Dict[str, Any]:
"""Validate and store an OpenVPN profile."""
if not isinstance(name, str) or not _NAME_RE.match(name):
return {
'ok': False,
'error': f'invalid name {name!r}; must match [a-z0-9_-]{{1,32}}',
}
try:
cleaned = self._validate_ovpn(ovpn_text)
except ValueError as e:
return {'ok': False, 'error': str(e)}
path = os.path.join(self.openvpn_dir, f'{name}.ovpn')
try:
self._write_secure(path, cleaned)
except Exception as e:
logger.error(f"upload_openvpn: write failed: {e}")
return {'ok': False, 'error': str(e)}
logger.info(f"connectivity: stored {name}.ovpn ({len(cleaned)} bytes)")
return {'ok': True}
# ── Routing application ───────────────────────────────────────────────
def apply_routes(self) -> Dict[str, Any]:
"""Idempotently rebuild all connectivity rules and policy routing."""
rules_applied = 0
try:
self._ensure_chains()
except Exception as e:
logger.warning(f"apply_routes: _ensure_chains failed: {e}")
# Flush our dedicated chains (without deleting them)
for table, chain in (('mangle', self.CONNECTIVITY_CHAIN),
('nat', self.CONNECTIVITY_CHAIN)):
try:
self._flush_chain(table, chain)
except Exception as e:
logger.warning(f"apply_routes: flush {table}/{chain} failed: {e}")
# Idempotent ip rule registration for each non-default exit
for exit_type in ('wireguard_ext', 'openvpn', 'tor'):
mark = self.MARKS[exit_type]
table = self.TABLES[exit_type]
try:
self._remove_ip_rule(mark, table)
self._add_ip_rule(mark, table)
rules_applied += 1
except Exception as e:
logger.warning(f"apply_routes: ip rule {exit_type} failed: {e}")
# Per-peer marking + nat redirect (Tor only)
if self.peer_registry is not None:
try:
peers = self.peer_registry.list_peers()
except Exception as e:
logger.warning(f"apply_routes: list_peers failed: {e}")
peers = []
for peer in peers:
exit_via = peer.get('exit_via', 'default')
if exit_via == 'default' or exit_via not in self.MARKS:
continue
src_ip = self._peer_source_ip(peer.get('peer', ''))
if not src_ip:
continue
mark = self.MARKS[exit_via]
try:
self._add_mark_rule(src_ip, mark)
rules_applied += 1
except Exception as e:
logger.warning(
f"apply_routes: mark rule for {src_ip}/{exit_via}: {e}"
)
# Tor: redirect TCP to local transparent proxy
if exit_via == 'tor':
try:
self._add_tor_redirect(src_ip)
rules_applied += 1
except Exception as e:
logger.warning(
f"apply_routes: tor redirect for {src_ip}: {e}"
)
# Kill-switch: drop marked packets that would otherwise leak via the
# default route if the exit interface is down.
for exit_type, iface in self.IFACES.items():
mark = self.MARKS[exit_type]
try:
self._add_killswitch(mark, iface)
rules_applied += 1
except Exception as e:
logger.warning(f"apply_routes: killswitch {exit_type}: {e}")
return {'ok': True, 'rules_applied': rules_applied}
# ── iptables / ip rule helpers ────────────────────────────────────────
def _wg_iptables(self, args: List[str], timeout: int = 10) -> subprocess.CompletedProcess:
"""Run iptables inside the WireGuard container (where peer traffic forwards)."""
cmd = ['docker', 'exec', WIREGUARD_CONTAINER, 'iptables'] + args
return subprocess.run(cmd, capture_output=True, text=True, timeout=timeout)
def _wg_ip(self, args: List[str], timeout: int = 10) -> subprocess.CompletedProcess:
"""Run `ip` inside the WireGuard container."""
cmd = ['docker', 'exec', WIREGUARD_CONTAINER, 'ip'] + args
return subprocess.run(cmd, capture_output=True, text=True, timeout=timeout)
def _ensure_chains(self) -> None:
"""Create PIC_CONNECTIVITY chains in mangle and nat (idempotent)."""
for table, parent_chain in (
('mangle', 'PREROUTING'),
('nat', 'PREROUTING'),
):
# Create chain if it doesn't already exist
check = self._wg_iptables(
['-t', table, '-L', self.CONNECTIVITY_CHAIN, '-n']
)
if check.returncode != 0:
create = self._wg_iptables(
['-t', table, '-N', self.CONNECTIVITY_CHAIN]
)
if create.returncode != 0 and 'exists' not in (create.stderr or ''):
logger.warning(
f"_ensure_chains: cannot create {table}/{self.CONNECTIVITY_CHAIN}: "
f"{create.stderr.strip()}"
)
# Insert jump from parent chain at position 1, idempotent.
jump_args = ['-t', table, '-C', parent_chain, '-j', self.CONNECTIVITY_CHAIN]
exists = self._wg_iptables(jump_args)
if exists.returncode != 0:
self._wg_iptables(
['-t', table, '-I', parent_chain, '1',
'-j', self.CONNECTIVITY_CHAIN]
)
def _flush_chain(self, table: str, chain: str) -> None:
"""Flush a chain in-place (`iptables -F`) without deleting it."""
self._wg_iptables(['-t', table, '-F', chain])
def _add_ip_rule(self, mark: int, table: int) -> None:
"""Add `ip rule fwmark <mark> lookup <table>`."""
self._wg_ip(['rule', 'add', 'fwmark', hex(mark), 'lookup', str(table)])
def _remove_ip_rule(self, mark: int, table: int) -> None:
"""Remove all matching `ip rule fwmark <mark> lookup <table>` (idempotent)."""
# `ip rule del` returns nonzero when no matching rule exists; loop
# until it fails to drain duplicates.
for _ in range(8):
r = self._wg_ip(['rule', 'del', 'fwmark', hex(mark), 'lookup', str(table)])
if r.returncode != 0:
break
def _add_mark_rule(self, src_ip: str, mark: int) -> None:
"""Mark packets from src_ip with mark in the mangle PIC_CONNECTIVITY chain."""
self._wg_iptables([
'-t', 'mangle', '-A', self.CONNECTIVITY_CHAIN,
'-s', src_ip,
'-j', 'MARK', '--set-mark', hex(mark),
])
def _add_tor_redirect(self, src_ip: str) -> None:
"""Redirect peer's TCP traffic to local Tor TransPort."""
self._wg_iptables([
'-t', 'nat', '-A', self.CONNECTIVITY_CHAIN,
'-s', src_ip, '-p', 'tcp',
'-j', 'REDIRECT', '--to-ports', str(self.TOR_TRANS_PORT),
])
def _add_killswitch(self, mark: int, iface: Optional[str]) -> None:
"""Drop marked packets that would egress via any interface other than iface.
For Tor (no exit iface), skip Tor traffic is fully redirected at
nat/REDIRECT and never reaches FORWARD.
"""
if not iface:
return
# Use -C to test, -A to add — idempotent.
check_args = ['-C', 'FORWARD',
'-m', 'mark', '--mark', hex(mark),
'!', '-o', iface, '-j', 'DROP']
exists = self._wg_iptables(check_args)
if exists.returncode != 0:
self._wg_iptables(['-A', 'FORWARD',
'-m', 'mark', '--mark', hex(mark),
'!', '-o', iface, '-j', 'DROP'])
def _exit_status(self, exit_type: str) -> Dict[str, Any]:
"""Return per-exit status (config presence + interface up/down)."""
info: Dict[str, Any] = {'configured': False, 'iface_up': False}
if exit_type == 'wireguard_ext':
path = os.path.join(self.wireguard_ext_dir, 'wg_ext0.conf')
info['configured'] = os.path.isfile(path)
elif exit_type == 'openvpn':
try:
info['configured'] = any(
f.endswith('.ovpn')
for f in os.listdir(self.openvpn_dir)
)
except OSError:
info['configured'] = False
elif exit_type == 'tor':
info['configured'] = True # Tor uses defaults; no per-cell config
iface = self.IFACES.get(exit_type)
if iface:
try:
r = self._wg_ip(['link', 'show', iface], timeout=5)
info['iface_up'] = r.returncode == 0 and 'UP' in (r.stdout or '')
except Exception:
info['iface_up'] = False
return info
def _peer_source_ip(self, peer_name: str) -> Optional[str]:
"""Return a peer's WireGuard IP (no /CIDR suffix)."""
if not peer_name or self.peer_registry is None:
return None
try:
peer = self.peer_registry.get_peer(peer_name)
except Exception as e:
logger.warning(f"_peer_source_ip({peer_name}): {e}")
return None
if not peer:
return None
ip = peer.get('ip', '')
if not ip:
return None
return ip.split('/')[0]
# ── Config validation ─────────────────────────────────────────────────
def _validate_wg_conf(self, text: str) -> str:
"""Strip Pre/Post-Up/Down hooks and reject conflicting wg0 interface.
Raises ValueError if the config tries to define `Interface = wg0`
(which would clash with the existing peer-server interface).
"""
if not isinstance(text, str):
raise ValueError('wg conf must be a string')
cleaned: List[str] = []
for raw_line in text.splitlines():
stripped = raw_line.strip()
# Reject wg0 interface declaration that would conflict with the
# existing WireGuard server interface.
if stripped.lower().startswith('interface'):
# Look ahead in subsequent lines for `= wg0` would be hard;
# the [Interface] section header itself is fine. We only
# reject explicit Name/Interface = wg0 directives.
pass
# Match assignments like `PostUp = ...`
if '=' in stripped:
key = stripped.split('=', 1)[0].strip()
if key in _WG_FORBIDDEN_PREFIXES:
logger.info(f"_validate_wg_conf: dropped {key} hook")
continue
# Detect Name = wg0 or Interface = wg0 inside Interface section
if key.lower() in ('name', 'interface') and \
stripped.split('=', 1)[1].strip().lower() == 'wg0':
raise ValueError(
"config defines interface 'wg0' which conflicts "
"with the peer-server interface"
)
cleaned.append(raw_line)
return '\n'.join(cleaned).rstrip() + '\n'
def _validate_ovpn(self, text: str) -> str:
"""Strip directives that execute external scripts/binaries."""
if not isinstance(text, str):
raise ValueError('ovpn conf must be a string')
cleaned: List[str] = []
for raw_line in text.splitlines():
stripped = raw_line.strip()
# Match the directive name (first whitespace-delimited token).
if stripped and not stripped.startswith('#'):
first = stripped.split(None, 1)[0]
if first in _OVPN_FORBIDDEN_DIRECTIVES:
logger.info(f"_validate_ovpn: dropped {first} directive")
continue
cleaned.append(raw_line)
return '\n'.join(cleaned).rstrip() + '\n'
# ── Filesystem helpers ────────────────────────────────────────────────
@staticmethod
def _write_secure(path: str, text: str) -> None:
"""Atomic 0o600 write — secrets in these configs must not be world-readable."""
os.makedirs(os.path.dirname(path), exist_ok=True)
tmp = path + '.tmp'
fd = os.open(tmp, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
try:
with os.fdopen(fd, 'w') as f:
f.write(text)
except Exception:
try:
os.unlink(tmp)
except OSError:
pass
raise
os.chmod(tmp, 0o600)
os.replace(tmp, path)
os.chmod(path, 0o600)
+486
View File
@@ -0,0 +1,486 @@
#!/usr/bin/env python3
"""
DDNS Manager for Personal Internet Cell.
Provides a provider-agnostic adapter for Dynamic DNS services used to keep the
cell's public IP registered under its chosen domain.
Supported providers:
pic_ngo pic.ngo DDNS service (primary / Phase 3 wiring)
cloudflare Cloudflare API v4 (stub; full impl in Phase 3b)
duckdns DuckDNS (stub; no DNS-01 support)
noip No-IP (stub)
freedns FreeDNS (stub)
The manager runs a background heartbeat thread that re-publishes the public IP
every 5 minutes, skipping the call when the IP has not changed.
"""
import logging
import threading
import time
from typing import Any, Dict, Optional
import requests
from base_service_manager import BaseServiceManager
logger = logging.getLogger(__name__)
# ---------------------------------------------------------------------------
# Custom exception
# ---------------------------------------------------------------------------
class DDNSError(Exception):
"""Raised when a DDNS provider returns an error response."""
# ---------------------------------------------------------------------------
# Provider base class
# ---------------------------------------------------------------------------
class DDNSProvider:
"""Base class — all providers implement these methods."""
def register(self, name: str, ip: str) -> dict:
"""Register subdomain. Returns {'token': str, 'subdomain': str}."""
raise NotImplementedError
def update(self, token: str, ip: str) -> bool:
"""Update A record. Returns True on success."""
raise NotImplementedError
def dns_challenge_create(self, token: str, fqdn: str, value: str) -> bool:
raise NotImplementedError
def dns_challenge_delete(self, token: str, fqdn: str) -> bool:
raise NotImplementedError
# ---------------------------------------------------------------------------
# pic.ngo provider
# ---------------------------------------------------------------------------
class PicNgoDDNS(DDNSProvider):
"""DDNS provider backed by the roof/pic-ddns API at ddns.pic.ngo."""
DEFAULT_API_BASE = 'https://ddns.pic.ngo'
TIMEOUT = 10
def __init__(self, api_base_url: Optional[str] = None):
self.api_base_url = (api_base_url or self.DEFAULT_API_BASE).rstrip('/')
# ------------------------------------------------------------------
# Internal helpers
# ------------------------------------------------------------------
def _headers(self, token: Optional[str] = None) -> Dict[str, str]:
h: Dict[str, str] = {'Content-Type': 'application/json'}
if token:
h['Authorization'] = f'Bearer {token}'
return h
def _raise_for_status(self, response: requests.Response, action: str):
if not response.ok:
raise DDNSError(
f"PicNgoDDNS {action} failed: HTTP {response.status_code}{response.text}"
)
# ------------------------------------------------------------------
# Public interface
# ------------------------------------------------------------------
def register(self, name: str, ip: str) -> dict:
"""POST /api/v1/register — register subdomain, returns token + subdomain."""
url = f'{self.api_base_url}/api/v1/register'
payload = {'name': name, 'ip': ip}
resp = requests.post(url, json=payload, headers=self._headers(), timeout=self.TIMEOUT)
self._raise_for_status(resp, 'register')
return resp.json()
def update(self, token: str, ip: str) -> bool:
"""PUT /api/v1/update — update A record."""
url = f'{self.api_base_url}/api/v1/update'
payload = {'ip': ip}
resp = requests.put(url, json=payload,
headers=self._headers(token), timeout=self.TIMEOUT)
self._raise_for_status(resp, 'update')
return True
def dns_challenge_create(self, token: str, fqdn: str, value: str) -> bool:
"""POST /api/v1/dns-challenge — create DNS-01 TXT record."""
url = f'{self.api_base_url}/api/v1/dns-challenge'
payload = {'fqdn': fqdn, 'value': value}
resp = requests.post(url, json=payload,
headers=self._headers(token), timeout=self.TIMEOUT)
self._raise_for_status(resp, 'dns_challenge_create')
return True
def dns_challenge_delete(self, token: str, fqdn: str) -> bool:
"""DELETE /api/v1/dns-challenge — remove DNS-01 TXT record."""
url = f'{self.api_base_url}/api/v1/dns-challenge'
payload = {'fqdn': fqdn}
resp = requests.delete(url, json=payload,
headers=self._headers(token), timeout=self.TIMEOUT)
self._raise_for_status(resp, 'dns_challenge_delete')
return True
# ---------------------------------------------------------------------------
# Cloudflare provider (stub)
# ---------------------------------------------------------------------------
class CloudflareDDNS(DDNSProvider):
"""DDNS via Cloudflare API v4. Stub — full impl in Phase 3b."""
API_BASE = 'https://api.cloudflare.com/client/v4'
TIMEOUT = 10
def __init__(self, api_token: str, zone_id: str):
self.api_token = api_token
self.zone_id = zone_id
def _headers(self) -> Dict[str, str]:
return {
'Authorization': f'Bearer {self.api_token}',
'Content-Type': 'application/json',
}
def register(self, name: str, ip: str) -> dict:
# Cloudflare doesn't have a registration step — return stub data.
return {'token': self.api_token, 'subdomain': name}
def update(self, token: str, ip: str) -> bool:
"""PATCH /zones/{zone_id}/dns_records — update A record."""
url = f'{self.API_BASE}/zones/{self.zone_id}/dns_records'
resp = requests.patch(url, json={'ip': ip}, headers=self._headers(),
timeout=self.TIMEOUT)
return resp.ok
def dns_challenge_create(self, token: str, fqdn: str, value: str) -> bool:
"""POST TXT record for DNS-01 challenge."""
url = f'{self.API_BASE}/zones/{self.zone_id}/dns_records'
payload = {'type': 'TXT', 'name': fqdn, 'content': value, 'ttl': 120}
resp = requests.post(url, json=payload, headers=self._headers(),
timeout=self.TIMEOUT)
return resp.ok
def dns_challenge_delete(self, token: str, fqdn: str) -> bool:
"""DELETE TXT record for DNS-01 challenge."""
# A real impl would look up the record ID first; stub returns True.
return True
# ---------------------------------------------------------------------------
# DuckDNS provider (stub)
# ---------------------------------------------------------------------------
class DuckDNSDDNS(DDNSProvider):
"""DDNS via DuckDNS. Stub — DNS-01 challenge not supported."""
UPDATE_URL = 'https://www.duckdns.org/update'
TIMEOUT = 10
def __init__(self, token: str, domain: str):
self._token = token
self._domain = domain
def register(self, name: str, ip: str) -> dict:
return {'token': self._token, 'subdomain': name}
def update(self, token: str, ip: str) -> bool:
params = {'domains': self._domain, 'token': token, 'ip': ip}
resp = requests.get(self.UPDATE_URL, params=params, timeout=self.TIMEOUT)
return resp.ok and resp.text.strip() == 'OK'
def dns_challenge_create(self, token: str, fqdn: str, value: str) -> bool:
raise NotImplementedError("DuckDNS does not support programmatic TXT record creation")
def dns_challenge_delete(self, token: str, fqdn: str) -> bool:
raise NotImplementedError("DuckDNS does not support programmatic TXT record deletion")
# ---------------------------------------------------------------------------
# No-IP provider (stub)
# ---------------------------------------------------------------------------
class NoIPDDNS(DDNSProvider):
"""DDNS via No-IP. Stub — DNS-01 not supported."""
def register(self, name: str, ip: str) -> dict:
raise NotImplementedError
def update(self, token: str, ip: str) -> bool:
raise NotImplementedError
def dns_challenge_create(self, token: str, fqdn: str, value: str) -> bool:
raise NotImplementedError
def dns_challenge_delete(self, token: str, fqdn: str) -> bool:
raise NotImplementedError
# ---------------------------------------------------------------------------
# FreeDNS provider (stub)
# ---------------------------------------------------------------------------
class FreeDNSDDNS(DDNSProvider):
"""DDNS via FreeDNS. Stub — DNS-01 not supported."""
def register(self, name: str, ip: str) -> dict:
raise NotImplementedError
def update(self, token: str, ip: str) -> bool:
raise NotImplementedError
def dns_challenge_create(self, token: str, fqdn: str, value: str) -> bool:
raise NotImplementedError
def dns_challenge_delete(self, token: str, fqdn: str) -> bool:
raise NotImplementedError
# ---------------------------------------------------------------------------
# Public IP helper
# ---------------------------------------------------------------------------
def _get_public_ip() -> Optional[str]:
"""Return the current public IPv4 address using ipify, or None on failure."""
try:
resp = requests.get('https://api.ipify.org', timeout=10)
if resp.ok:
return resp.text.strip()
except Exception as exc:
logger.warning("Could not determine public IP: %s", exc)
return None
# ---------------------------------------------------------------------------
# Manager
# ---------------------------------------------------------------------------
_HEARTBEAT_INTERVAL = 300 # 5 minutes
class DDNSManager(BaseServiceManager):
"""Manages DDNS registration and periodic IP updates."""
def __init__(self, config_manager=None,
data_dir: str = '/app/data',
config_dir: str = '/app/config'):
super().__init__('ddns', data_dir, config_dir)
self.config_manager = config_manager
self._last_ip: Optional[str] = None
self._stop_event = threading.Event()
self._heartbeat_thread: Optional[threading.Thread] = None
# ------------------------------------------------------------------
# BaseServiceManager abstract method implementations
# ------------------------------------------------------------------
def get_status(self) -> Dict[str, Any]:
identity = self._identity()
domain_cfg = identity.get('domain', {})
return {
'service': 'ddns',
'provider': domain_cfg.get('ddns', {}).get('provider') if domain_cfg else None,
'last_ip': self._last_ip,
'heartbeat_running': (
self._heartbeat_thread is not None and
self._heartbeat_thread.is_alive()
),
}
def test_connectivity(self) -> Dict[str, Any]:
provider = self.get_provider()
if provider is None:
return {'success': False, 'reason': 'No DDNS provider configured'}
ip = _get_public_ip()
if ip is None:
return {'success': False, 'reason': 'Could not reach ipify'}
return {'success': True, 'public_ip': ip}
# ------------------------------------------------------------------
# Identity helpers
# ------------------------------------------------------------------
def _identity(self) -> Dict[str, Any]:
if self.config_manager is None:
return {}
return self.config_manager.get_identity() or {}
# ------------------------------------------------------------------
# Provider factory
# ------------------------------------------------------------------
def get_provider(self) -> Optional[DDNSProvider]:
"""Instantiate and return the configured DDNS provider, or None."""
identity = self._identity()
domain_cfg = identity.get('domain', {})
if not domain_cfg:
return None
ddns_cfg = domain_cfg.get('ddns', {})
if not ddns_cfg:
return None
provider_name = ddns_cfg.get('provider')
if not provider_name:
return None
if provider_name == 'pic_ngo':
api_base = ddns_cfg.get('api_base_url')
return PicNgoDDNS(api_base_url=api_base)
if provider_name == 'cloudflare':
return CloudflareDDNS(
api_token=ddns_cfg.get('api_token', ''),
zone_id=ddns_cfg.get('zone_id', ''),
)
if provider_name == 'duckdns':
return DuckDNSDDNS(
token=ddns_cfg.get('token', ''),
domain=ddns_cfg.get('domain', ''),
)
if provider_name == 'noip':
return NoIPDDNS()
if provider_name == 'freedns':
return FreeDNSDDNS()
logger.warning("Unknown DDNS provider: %s", provider_name)
return None
# ------------------------------------------------------------------
# Registration
# ------------------------------------------------------------------
def register(self, name: str, ip: str) -> dict:
"""Register the cell's subdomain with the configured provider.
Stores the returned token in the identity config under
identity['domain']['ddns']['token'] and records the subdomain.
Returns the dict from provider.register().
"""
provider = self.get_provider()
if provider is None:
raise DDNSError("No DDNS provider configured")
result = provider.register(name, ip)
# Persist token + subdomain back into identity
identity = self._identity()
domain_cfg = dict(identity.get('domain', {}))
ddns_cfg = dict(domain_cfg.get('ddns', {}))
if 'token' in result:
ddns_cfg['token'] = result['token']
if 'subdomain' in result:
ddns_cfg['subdomain'] = result['subdomain']
domain_cfg['ddns'] = ddns_cfg
if self.config_manager is not None:
self.config_manager.set_identity_field('domain', domain_cfg)
self._last_ip = ip
return result
# ------------------------------------------------------------------
# IP update
# ------------------------------------------------------------------
def update_ip(self):
"""Fetch current public IP and update DDNS only if it has changed."""
provider = self.get_provider()
if provider is None:
logger.debug("DDNS update_ip: no provider configured, skipping")
return
current_ip = _get_public_ip()
if current_ip is None:
logger.warning("DDNS update_ip: could not determine public IP")
return
if current_ip == self._last_ip:
logger.debug("DDNS update_ip: IP unchanged (%s), skipping", current_ip)
return
identity = self._identity()
domain_cfg = identity.get('domain', {})
ddns_cfg = domain_cfg.get('ddns', {}) if domain_cfg else {}
token = ddns_cfg.get('token', '')
try:
success = provider.update(token, current_ip)
if success:
logger.info("DDNS update_ip: updated to %s", current_ip)
self._last_ip = current_ip
else:
logger.warning("DDNS update_ip: provider.update() returned False")
except DDNSError as exc:
logger.error("DDNS update_ip: provider error: %s", exc)
# ------------------------------------------------------------------
# Heartbeat
# ------------------------------------------------------------------
def start_heartbeat(self):
"""Start a daemon thread that calls update_ip() every 5 minutes."""
if self._heartbeat_thread is not None and self._heartbeat_thread.is_alive():
logger.debug("DDNS heartbeat already running")
return
self._stop_event.clear()
self._heartbeat_thread = threading.Thread(
target=self._heartbeat_loop,
name='ddns-heartbeat',
daemon=True,
)
self._heartbeat_thread.start()
logger.info("DDNS heartbeat thread started (interval=%ds)", _HEARTBEAT_INTERVAL)
def stop_heartbeat(self):
"""Signal the heartbeat thread to stop and wait for it to exit."""
self._stop_event.set()
if self._heartbeat_thread is not None:
self._heartbeat_thread.join(timeout=10)
self._heartbeat_thread = None
def _heartbeat_loop(self):
"""Internal: run update_ip() periodically until _stop_event is set."""
while not self._stop_event.is_set():
try:
self.update_ip()
except Exception as exc:
logger.warning("DDNS heartbeat: unexpected error: %s", exc)
# Sleep in short slices so stop_heartbeat() is responsive
for _ in range(_HEARTBEAT_INTERVAL):
if self._stop_event.is_set():
break
time.sleep(1)
# ------------------------------------------------------------------
# DNS challenge delegation
# ------------------------------------------------------------------
def dns_challenge_create(self, fqdn: str, value: str) -> bool:
"""Create a DNS-01 TXT record via the configured provider."""
provider = self.get_provider()
if provider is None:
raise DDNSError("No DDNS provider configured")
identity = self._identity()
domain_cfg = identity.get('domain', {})
ddns_cfg = domain_cfg.get('ddns', {}) if domain_cfg else {}
token = ddns_cfg.get('token', '')
return provider.dns_challenge_create(token, fqdn, value)
def dns_challenge_delete(self, fqdn: str) -> bool:
"""Delete a DNS-01 TXT record via the configured provider."""
provider = self.get_provider()
if provider is None:
raise DDNSError("No DDNS provider configured")
identity = self._identity()
domain_cfg = identity.get('domain', {})
ddns_cfg = domain_cfg.get('ddns', {}) if domain_cfg else {}
token = ddns_cfg.get('token', '')
return provider.dns_challenge_delete(token, fqdn)
+197 -38
View File
@@ -8,10 +8,13 @@ import os
import subprocess
import logging
import re
import threading
from typing import Dict, List, Any, Optional
logger = logging.getLogger(__name__)
_forward_stateful_lock = threading.Lock()
# Virtual IPs assigned to Caddy per service — must match Caddyfile listeners.
# Populated at import time from the default subnet; call update_service_ips()
# whenever ip_range changes so all downstream callers see the new values.
@@ -325,6 +328,22 @@ def _get_dns_container_ip() -> str:
return '172.20.0.3'
def _get_wg_server_ip() -> Optional[str]:
"""Return the WireGuard server's VPN IP from wg0.conf (e.g. '10.0.0.1')."""
import ipaddress as _ipaddress
wg_conf_path = '/app/config/wireguard/wg_confs/wg0.conf'
try:
with open(wg_conf_path) as f:
for line in f:
line = line.strip()
if line.startswith('Address') and '=' in line:
addr = line.split('=', 1)[1].strip()
return str(_ipaddress.ip_interface(addr).ip)
except Exception:
pass
return None
def _get_caddy_container_ip() -> str:
"""Return cell-caddy container's Docker bridge IP. Falls back to 172.20.0.2."""
try:
@@ -431,38 +450,48 @@ def apply_all_cell_rules(cell_links: List[Dict[str, Any]]) -> None:
def ensure_forward_stateful() -> bool:
"""Insert a stateful ESTABLISHED/RELATED ACCEPT at the top of FORWARD.
"""Ensure ESTABLISHED/RELATED ACCEPT is at position 1 (top) of FORWARD.
Cell rules DROP all traffic from a connected cell's subnet except specific
service ports. Without conntrack, ICMP replies and TCP ACKs for connections
initiated BY local peers to the connected cell are also dropped, making
cross-cell routing (peer cell remote cell) broken.
This rule is inserted once and does not carry a peer/cell comment tag, so it
is never removed by clear_peer_rules or clear_cell_rules.
This function always deletes any existing instance and re-inserts at position 1.
That re-anchoring is necessary because wg0 PostUp uses -I FORWARD (insert at top),
which pushes this rule down every time wg0 restarts causing ICMP to hit the
per-peer DROP rule before reaching the stateful ACCEPT.
"""
try:
check = ['-C', 'FORWARD', '-m', 'state', '--state', 'ESTABLISHED,RELATED', '-j', 'ACCEPT']
if _wg_exec(['iptables'] + check).returncode == 0:
return True # already present
_wg_exec(['iptables', '-I', 'FORWARD', '1', '-m', 'state',
'--state', 'ESTABLISHED,RELATED', '-j', 'ACCEPT'])
logger.info('ensure_forward_stateful: inserted ESTABLISHED,RELATED ACCEPT into FORWARD')
return True
except Exception as e:
logger.error(f'ensure_forward_stateful: {e}')
return False
with _forward_stateful_lock:
try:
# Remove all existing instances so we can re-anchor at position 1.
# PostUp -I FORWARD rules drift this rule down on every wg0 restart.
while _wg_exec(['iptables', '-D', 'FORWARD', '-m', 'state',
'--state', 'ESTABLISHED,RELATED', '-j', 'ACCEPT']).returncode == 0:
pass
_wg_exec(['iptables', '-I', 'FORWARD', '1', '-m', 'state',
'--state', 'ESTABLISHED,RELATED', '-j', 'ACCEPT'])
logger.info('ensure_forward_stateful: ESTABLISHED,RELATED anchored at FORWARD position 1')
return True
except Exception as e:
logger.error(f'ensure_forward_stateful: {e}')
return False
def ensure_cell_api_dnat() -> bool:
"""DNAT wg0:3000 → cell-api:3000 inside cell-wireguard.
"""DNAT wg0:3000 (scoped to WG server IP) → cell-api:3000 inside cell-wireguard.
Remote cells push permission updates over the WireGuard tunnel to our
wg0 interface on port 3000. Since cell-api only listens on the Docker
bridge, we need a DNAT rule inside cell-wireguard's namespace to forward
that traffic. Called on every startup so rules survive container restarts.
wg0 interface on port 3000. The DNAT is scoped to -d {server_ip} so that
cross-cell traffic destined for another cell's API (different WG IP) is
not intercepted. Called on every startup so rules survive container restarts.
"""
try:
server_ip = _get_wg_server_ip()
if not server_ip:
logger.warning('ensure_cell_api_dnat: could not determine WG server IP')
return False
r = _run(['docker', 'inspect', '--format',
'{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}',
'cell-api'], check=False)
@@ -471,10 +500,12 @@ def ensure_cell_api_dnat() -> bool:
logger.warning('ensure_cell_api_dnat: cell-api container not found or no IP')
return False
dnat_check = ['-t', 'nat', '-C', 'PREROUTING', '-i', 'wg0', '-p', 'tcp',
'--dport', '3000', '-j', 'DNAT', '--to-destination', f'{api_ip}:3000']
dnat_add = ['-t', 'nat', '-A', 'PREROUTING', '-i', 'wg0', '-p', 'tcp',
'--dport', '3000', '-j', 'DNAT', '--to-destination', f'{api_ip}:3000']
dnat_check = ['-t', 'nat', '-C', 'PREROUTING', '-i', 'wg0', '-d', server_ip,
'-p', 'tcp', '--dport', '3000',
'-j', 'DNAT', '--to-destination', f'{api_ip}:3000']
dnat_add = ['-t', 'nat', '-A', 'PREROUTING', '-i', 'wg0', '-d', server_ip,
'-p', 'tcp', '--dport', '3000',
'-j', 'DNAT', '--to-destination', f'{api_ip}:3000']
if _wg_exec(['iptables'] + dnat_check).returncode != 0:
_wg_exec(['iptables'] + dnat_add)
@@ -500,21 +531,27 @@ def ensure_cell_api_dnat() -> bool:
def ensure_dns_dnat() -> bool:
"""DNAT wg0:53 (UDP+TCP) → cell-dns:53 so VPN peers use the WG server IP for DNS.
"""DNAT wg0:53 (scoped to WG server IP) → cell-dns:53.
Peers are configured with DNS = <wg_server_ip>. Their DNS queries arrive on
wg0:53 and must be forwarded to cell-dns inside the Docker bridge.
Peers send DNS queries to the WG server IP. DNAT is scoped with -d {server_ip}
so cross-cell DNS traffic destined for another cell is forwarded, not hijacked.
"""
try:
server_ip = _get_wg_server_ip()
if not server_ip:
logger.warning('ensure_dns_dnat: could not determine WG server IP')
return False
dns_ip = _get_dns_container_ip()
if not dns_ip:
logger.warning('ensure_dns_dnat: cell-dns not found')
return False
for proto in ('udp', 'tcp'):
dnat_check = ['-t', 'nat', '-C', 'PREROUTING', '-i', 'wg0', '-p', proto,
'--dport', '53', '-j', 'DNAT', '--to-destination', f'{dns_ip}:53']
dnat_add = ['-t', 'nat', '-A', 'PREROUTING', '-i', 'wg0', '-p', proto,
'--dport', '53', '-j', 'DNAT', '--to-destination', f'{dns_ip}:53']
dnat_check = ['-t', 'nat', '-C', 'PREROUTING', '-i', 'wg0', '-d', server_ip,
'-p', proto, '--dport', '53',
'-j', 'DNAT', '--to-destination', f'{dns_ip}:53']
dnat_add = ['-t', 'nat', '-A', 'PREROUTING', '-i', 'wg0', '-d', server_ip,
'-p', proto, '--dport', '53',
'-j', 'DNAT', '--to-destination', f'{dns_ip}:53']
if _wg_exec(['iptables'] + dnat_check).returncode != 0:
_wg_exec(['iptables'] + dnat_add)
for proto in ('udp', 'tcp'):
@@ -524,7 +561,7 @@ def ensure_dns_dnat() -> bool:
'-p', proto, '--dport', '53', '-j', 'ACCEPT']
if _wg_exec(['iptables'] + fwd_check).returncode != 0:
_wg_exec(['iptables'] + fwd_add)
logger.info(f'ensure_dns_dnat: wg0:53 → {dns_ip}:53')
logger.info(f'ensure_dns_dnat: wg0:{server_ip}:53 → {dns_ip}:53')
return True
except Exception as e:
logger.error(f'ensure_dns_dnat: {e}')
@@ -532,20 +569,26 @@ def ensure_dns_dnat() -> bool:
def ensure_service_dnat() -> bool:
"""DNAT wg0:80 → cell-caddy:80 so VPN peers reach services via Host-header routing.
"""DNAT wg0:80 (scoped to WG server IP) → cell-caddy:80.
All service DNS names resolve to the WG server IP. Traffic to wg0:80 is
forwarded to Caddy, which routes to the correct backend by Host header.
Service DNS names resolve to the WG server IP. DNAT is scoped with -d {server_ip}
so that cross-cell HTTP traffic destined for another cell passes through unmodified.
"""
try:
server_ip = _get_wg_server_ip()
if not server_ip:
logger.warning('ensure_service_dnat: could not determine WG server IP')
return False
caddy_ip = _get_caddy_container_ip()
if not caddy_ip:
logger.warning('ensure_service_dnat: cell-caddy not found')
return False
dnat_check = ['-t', 'nat', '-C', 'PREROUTING', '-i', 'wg0', '-p', 'tcp',
'--dport', '80', '-j', 'DNAT', '--to-destination', f'{caddy_ip}:80']
dnat_add = ['-t', 'nat', '-A', 'PREROUTING', '-i', 'wg0', '-p', 'tcp',
'--dport', '80', '-j', 'DNAT', '--to-destination', f'{caddy_ip}:80']
dnat_check = ['-t', 'nat', '-C', 'PREROUTING', '-i', 'wg0', '-d', server_ip,
'-p', 'tcp', '--dport', '80',
'-j', 'DNAT', '--to-destination', f'{caddy_ip}:80']
dnat_add = ['-t', 'nat', '-A', 'PREROUTING', '-i', 'wg0', '-d', server_ip,
'-p', 'tcp', '--dport', '80',
'-j', 'DNAT', '--to-destination', f'{caddy_ip}:80']
if _wg_exec(['iptables'] + dnat_check).returncode != 0:
_wg_exec(['iptables'] + dnat_add)
fwd_check = ['-C', 'FORWARD', '-i', 'wg0', '-o', 'eth0',
@@ -554,13 +597,80 @@ def ensure_service_dnat() -> bool:
'-p', 'tcp', '--dport', '80', '-j', 'ACCEPT']
if _wg_exec(['iptables'] + fwd_check).returncode != 0:
_wg_exec(['iptables'] + fwd_add)
logger.info(f'ensure_service_dnat: wg0:80 → {caddy_ip}:80')
logger.info(f'ensure_service_dnat: wg0:{server_ip}:80 → {caddy_ip}:80')
return True
except Exception as e:
logger.error(f'ensure_service_dnat: {e}')
return False
def ensure_wg_masquerade() -> bool:
"""MASQUERADE Docker bridge traffic leaving via wg0, and allow it through FORWARD.
cell-dns and other Docker containers need to reach remote cell subnets via
cell-wireguard's wg0. Without MASQUERADE the source IP (172.20.x.x) can't be
routed back over the WireGuard tunnel (WireGuard only accepts 10.0.x.x sources
from peers). MASQUERADE rewrites the source to wg0's IP so replies can return.
"""
try:
masq_check = ['-t', 'nat', '-C', 'POSTROUTING', '-o', 'wg0',
'-s', '172.20.0.0/16', '-j', 'MASQUERADE']
masq_add = ['-t', 'nat', '-A', 'POSTROUTING', '-o', 'wg0',
'-s', '172.20.0.0/16', '-j', 'MASQUERADE']
if _wg_exec(['iptables'] + masq_check).returncode != 0:
_wg_exec(['iptables'] + masq_add)
fwd_check = ['-C', 'FORWARD', '-i', 'eth0', '-o', 'wg0',
'-s', '172.20.0.0/16', '-j', 'ACCEPT']
fwd_add = ['-I', 'FORWARD', '-i', 'eth0', '-o', 'wg0',
'-s', '172.20.0.0/16', '-j', 'ACCEPT']
if _wg_exec(['iptables'] + fwd_check).returncode != 0:
_wg_exec(['iptables'] + fwd_add)
logger.info('ensure_wg_masquerade: Docker→wg0 MASQUERADE+FORWARD configured')
return True
except Exception as e:
logger.error(f'ensure_wg_masquerade: {e}')
return False
def ensure_cell_subnet_routes(cell_links: List[Dict[str, Any]]) -> None:
"""Add host-namespace routes for remote cell VPN subnets via cell-wireguard.
Docker containers (cell-dns, etc.) use the host's routing table to reach
non-bridge destinations. Without a route, packets to 10.0.x.0/24 subnets
of connected cells hit the host's default gateway instead of cell-wireguard.
Uses a temporary '--network host --rm' container to run ip route replace in
the host network namespace. cell-api has docker.sock so this works without
privileged mode or nsenter namespace tricks.
"""
if not cell_links:
return
WG_BRIDGE_IP = '172.20.0.9' # cell-wireguard's fixed Docker IP (docker-compose.yml)
for link in cell_links:
subnet = link.get('vpn_subnet', '')
if not subnet:
continue
try:
result = _run(
['docker', 'run', '--rm',
'--network', 'host',
'--cap-add', 'NET_ADMIN',
'alpine',
'ip', 'route', 'replace', subnet, 'via', WG_BRIDGE_IP],
check=False
)
if result.returncode == 0:
logger.info(f'ensure_cell_subnet_routes: {subnet} via {WG_BRIDGE_IP}')
else:
logger.warning(
f'ensure_cell_subnet_routes: {subnet} failed: {result.stderr.strip()}'
)
except Exception as e:
logger.warning(f'ensure_cell_subnet_routes: {subnet}: {e}')
# ---------------------------------------------------------------------------
# DNS ACL (CoreDNS Corefile generation)
# ---------------------------------------------------------------------------
@@ -694,3 +804,52 @@ def apply_all_dns_rules(peers: List[Dict[str, Any]], corefile_path: str = COREFI
if ok:
reload_coredns()
return ok
# ---------------------------------------------------------------------------
# Service store firewall rules
# ---------------------------------------------------------------------------
def _service_tag(service_id: str) -> str:
safe = re.sub(r'[^a-z0-9]', '-', service_id.lower())
return f'pic-svc-{safe}'
def apply_service_rules(service_id: str, service_ip: str, rules: list) -> bool:
"""Apply manifest-declared ACCEPT rules for an installed service."""
tag = _service_tag(service_id)
clear_service_rules(service_id)
for r in rules:
if r.get('type') != 'ACCEPT':
continue
dest_ip = r['dest_ip'].replace('${SERVICE_IP}', service_ip)
dport = str(r['dest_port'])
proto = r.get('proto', 'tcp')
_iptables(['-I', 'FORWARD',
'-d', dest_ip, '-p', proto, '--dport', dport,
'-m', 'comment', '--comment', tag,
'-j', 'ACCEPT'])
return True
def clear_service_rules(service_id: str) -> None:
"""Remove all iptables rules tagged for this service using save/restore."""
tag = _service_tag(service_id)
comment_re = re.compile(rf'--comment\s+["\']?{re.escape(tag)}["\']?(\s|$)')
try:
save = _wg_exec(['iptables-save'])
if save.returncode != 0:
return
lines = save.stdout.splitlines()
filtered = [l for l in lines if not comment_re.search(l)]
if len(filtered) == len(lines):
return
restore_input = '\n'.join(filtered) + '\n'
restore = subprocess.run(
['docker', 'exec', '-i', WIREGUARD_CONTAINER, 'iptables-restore'],
input=restore_input, capture_output=True, text=True, timeout=10
)
if restore.returncode != 0:
logger.warning(f'clear_service_rules iptables-restore failed: {restore.stderr.strip()}')
except Exception as e:
logger.error(f'clear_service_rules({service_id}): {e}')
+24 -1
View File
@@ -27,6 +27,10 @@ from log_manager import LogManager
from cell_link_manager import CellLinkManager
import firewall_manager
from auth_manager import AuthManager
from setup_manager import SetupManager
from caddy_manager import CaddyManager
from ddns_manager import DDNSManager
from connectivity_manager import ConnectivityManager
DATA_DIR = os.environ.get('DATA_DIR', '/app/data')
CONFIG_DIR = os.environ.get('CONFIG_DIR', '/app/config')
@@ -53,6 +57,24 @@ cell_link_manager = CellLinkManager(
network_manager=network_manager,
)
auth_manager = AuthManager(data_dir=DATA_DIR, config_dir=CONFIG_DIR)
setup_manager = SetupManager(config_manager=config_manager, auth_manager=auth_manager)
caddy_manager = CaddyManager(config_manager=config_manager, data_dir=DATA_DIR, config_dir=CONFIG_DIR)
ddns_manager = DDNSManager(config_manager=config_manager, data_dir=DATA_DIR, config_dir=CONFIG_DIR)
connectivity_manager = ConnectivityManager(
config_manager=config_manager,
peer_registry=peer_registry,
data_dir=DATA_DIR,
config_dir=CONFIG_DIR,
)
from service_store_manager import ServiceStoreManager
service_store_manager = ServiceStoreManager(
config_manager=config_manager,
caddy_manager=caddy_manager,
container_manager=container_manager,
data_dir=DATA_DIR,
config_dir=CONFIG_DIR,
)
# Service logger configuration
_service_log_configs = {
@@ -86,7 +108,8 @@ __all__ = [
'network_manager', 'wireguard_manager', 'peer_registry',
'email_manager', 'calendar_manager', 'file_manager',
'routing_manager', 'vault_manager', 'container_manager',
'cell_link_manager', 'auth_manager',
'cell_link_manager', 'auth_manager', 'setup_manager', 'caddy_manager',
'ddns_manager', 'service_store_manager', 'connectivity_manager',
'firewall_manager', 'EventType',
'DATA_DIR', 'CONFIG_DIR',
]
+30
View File
@@ -194,11 +194,15 @@ class PeerRegistry(BaseServiceManager):
self.logger.error(f"Error loading peers: {e}")
self.peers = []
# Phase 3 migration: per-peer internet routing
# Phase 5 migration: per-peer extended-connectivity exit (wireguard_ext, openvpn, tor)
changed = False
for peer in self.peers:
if 'route_via' not in peer:
peer['route_via'] = None
changed = True
if 'exit_via' not in peer:
peer['exit_via'] = 'default'
changed = True
if changed:
self._save_peers()
else:
@@ -346,6 +350,32 @@ class PeerRegistry(BaseServiceManager):
return dict(peer)
raise ValueError(f"Peer '{peer_name}' not found")
# Phase 5: extended connectivity per-peer egress exit
VALID_EXIT_VIA = ('default', 'wireguard_ext', 'openvpn', 'tor')
def set_peer_exit_via(self, peer_name: str, exit_type: str) -> bool:
"""Set the per-peer egress exit type. Returns True if updated, False
if the peer is not found (logged as warning, no exception)."""
if exit_type not in self.VALID_EXIT_VIA:
self.logger.warning(
f"set_peer_exit_via: invalid exit_type {exit_type!r}"
)
return False
with self.lock:
for peer in self.peers:
if peer.get('peer') == peer_name:
peer['exit_via'] = exit_type
peer['updated_at'] = datetime.utcnow().isoformat()
self._save_peers()
self.logger.info(
f"Set exit_via for {peer_name}: {exit_type!r}"
)
return True
self.logger.warning(
f"set_peer_exit_via: peer {peer_name!r} not found"
)
return False
def get_peer_stats(self) -> Dict[str, Any]:
"""Get peer registry statistics"""
try:
+105
View File
@@ -0,0 +1,105 @@
"""
Service Store Blueprint /api/store
Provides routes to browse, install, and remove services from the PIC
service store. Authentication is enforced by the global before_request
hook in app.py (admin session required for all /api/* routes except
/api/auth/*).
"""
import logging
from flask import Blueprint, request, jsonify
import requests as _requests
from service_store_manager import MANIFEST_URL_TPL
logger = logging.getLogger('picell')
store_bp = Blueprint('service_store', __name__, url_prefix='/api/store')
def _ssm():
"""Lazy import of service_store_manager to avoid circular import at module load."""
from app import service_store_manager
return service_store_manager
def _cfg():
from app import config_manager
return config_manager
@store_bp.route('/services', methods=['GET'])
def list_store_services():
"""Return available and installed services."""
try:
return jsonify(_ssm().list_services())
except Exception as e:
logger.error(f'list_store_services: {e}')
return jsonify({'error': str(e)}), 500
@store_bp.route('/services/<service_id>/manifest', methods=['GET'])
def get_manifest(service_id: str):
"""Fetch and return the manifest for a specific service."""
try:
url = MANIFEST_URL_TPL.format(id=service_id)
resp = _requests.get(url, timeout=10)
resp.raise_for_status()
return jsonify(resp.json())
except _requests.HTTPError as e:
return jsonify({'error': f'Manifest not found: {e}'}), 404
except Exception as e:
logger.error(f'get_manifest({service_id}): {e}')
return jsonify({'error': str(e)}), 500
@store_bp.route('/services/<service_id>/install', methods=['POST'])
def install_service(service_id: str):
"""Install a service from the store."""
try:
result = _ssm().install(service_id)
if result.get('ok'):
return jsonify(result)
return jsonify(result), 400
except Exception as e:
logger.error(f'install_service({service_id}): {e}')
return jsonify({'error': str(e)}), 500
@store_bp.route('/services/<service_id>', methods=['DELETE'])
def remove_service(service_id: str):
"""Remove an installed service."""
try:
purge = request.args.get('purge') == 'true'
result = _ssm().remove(service_id, purge_data=purge)
if result.get('ok'):
return jsonify(result)
return jsonify(result), 404
except Exception as e:
logger.error(f'remove_service({service_id}): {e}')
return jsonify({'error': str(e)}), 500
@store_bp.route('/installed', methods=['GET'])
def get_installed():
"""Return all currently installed services."""
try:
return jsonify({'installed': _cfg().get_installed_services()})
except Exception as e:
logger.error(f'get_installed: {e}')
return jsonify({'error': str(e)}), 500
@store_bp.route('/refresh', methods=['POST'])
def refresh_index():
"""Invalidate the index cache and return a fresh service list."""
try:
ssm = _ssm()
ssm._index_cache = None
ssm._index_cache_time = 0
return jsonify(ssm.list_services())
except Exception as e:
logger.error(f'refresh_index: {e}')
return jsonify({'error': str(e)}), 500
+65
View File
@@ -0,0 +1,65 @@
import logging
from flask import Blueprint, request, jsonify
logger = logging.getLogger('picell')
setup_bp = Blueprint('setup', __name__, url_prefix='/api/setup')
def _get_setup_manager():
from app import setup_manager
return setup_manager
@setup_bp.route('/status', methods=['GET'])
def get_setup_status():
"""Return wizard status and available options."""
sm = _get_setup_manager()
if sm.is_setup_complete():
return jsonify({'error': 'Setup already complete'}), 410
return jsonify(sm.get_setup_status())
@setup_bp.route('/validate', methods=['POST'])
def validate_setup_step():
"""Validate a single wizard step.
Expects JSON body: ``{'step': '<step_name>', 'data': {...}}``.
Supported steps: ``cell_name``, ``password``.
"""
sm = _get_setup_manager()
if sm.is_setup_complete():
return jsonify({'error': 'Setup already complete'}), 410
body = request.get_json(silent=True) or {}
step = body.get('step', '')
data = body.get('data', {})
if step == 'cell_name':
errors = sm.validate_cell_name(data.get('cell_name', ''))
elif step == 'password':
errors = sm.validate_password(data.get('password', ''))
else:
return jsonify({'valid': False, 'errors': [f"Unknown step: {step!r}"]}), 400
return jsonify({'valid': len(errors) == 0, 'errors': errors})
@setup_bp.route('/complete', methods=['POST'])
def complete_setup():
"""Complete the first-run wizard and create the admin account."""
sm = _get_setup_manager()
if sm.is_setup_complete():
return jsonify({'error': 'Setup already complete'}), 410
payload = request.get_json(silent=True) or {}
result = sm.complete_setup(payload)
status_code = 200 if result.get('success') else 400
# TODO (Phase 3): if result.get('success') and domain_mode == 'pic_ngo':
# from app import ddns_manager
# name = payload.get('cell_name', '')
# ip = payload.get('public_ip', '')
# ddns_manager.register(name, ip)
return jsonify(result), status_code
+526
View File
@@ -0,0 +1,526 @@
#!/usr/bin/env python3
"""
Service Store Manager for Personal Internet Cell.
Manages installation, removal, and lifecycle of third-party services from the
PIC service store index. Each installed service runs as a Docker container
declared in a compose override file and has:
- An allocated IP in the service pool (172.20.0.20254 by default)
- Optional iptables FORWARD rules declared in its manifest
- Optional Caddy reverse-proxy route declared in its manifest
"""
import logging
import os
import re
import threading
import subprocess
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple
import requests
import yaml
from base_service_manager import BaseServiceManager
from ip_utils import CONTAINER_OFFSETS
logger = logging.getLogger(__name__)
# ---------------------------------------------------------------------------
# Constants
# ---------------------------------------------------------------------------
SERVICE_POOL_START = 20
SERVICE_POOL_END = 254
INDEX_URL_DEFAULT = (
'https://git.pic.ngo/roof/pic-services/raw/branch/main/index.json'
)
MANIFEST_URL_TPL = (
'https://git.pic.ngo/roof/pic-services/raw/branch/main/services/{id}/manifest.json'
)
IMAGE_ALLOWLIST_RE = re.compile(
r'^git\.pic\.ngo/roof/[a-z0-9._/-]+(:[a-zA-Z0-9._-]+)?$'
)
FORBIDDEN_MOUNTS = frozenset([
'/', '/etc', '/var', '/proc', '/sys', '/dev', '/app', '/run', '/boot',
])
RESERVED_SUBDOMAINS = frozenset([
'api', 'webui', 'admin', 'www', 'mail', 'ns1', 'ns2',
'git', 'registry', 'install',
])
ENV_VALUE_RE = re.compile(r'^[A-Za-z0-9._@:/+\-= ]*$')
# ---------------------------------------------------------------------------
# ServiceStoreManager
# ---------------------------------------------------------------------------
class ServiceStoreManager(BaseServiceManager):
"""Manages service store: install, remove, and list available/installed services."""
def __init__(self, config_manager, caddy_manager, container_manager,
data_dir: str = '', config_dir: str = ''):
super().__init__('service_store', data_dir, config_dir)
self.config_manager = config_manager
self.caddy_manager = caddy_manager
self.container_manager = container_manager
self.compose_override = os.environ.get(
'COMPOSE_SERVICES_PATH', '/app/docker-compose.services.yml'
)
self.index_url = os.environ.get('PIC_STORE_INDEX_URL', INDEX_URL_DEFAULT)
self._lock = threading.Lock()
self._index_cache: Optional[list] = None
self._index_cache_time: float = 0
self._cache_ttl: int = 300 # 5 min
# ── BaseServiceManager required ───────────────────────────────────────
def get_status(self) -> Dict[str, Any]:
installed = self.config_manager.get_installed_services()
return {
'service': self.service_name,
'running': True,
'installed_count': len(installed),
}
def test_connectivity(self) -> Dict[str, Any]:
try:
resp = requests.get(self.index_url, timeout=5)
return {'success': resp.status_code == 200}
except Exception as e:
return {'success': False, 'error': str(e)}
# ── Manifest validation ───────────────────────────────────────────────
@staticmethod
def _validate_manifest(m: dict) -> Tuple[bool, List[str]]:
"""Validate a service manifest. Returns (ok, [errors])."""
errors: List[str] = []
# Required top-level fields
for field in ('id', 'name', 'version', 'author', 'image', 'container_name'):
if not m.get(field):
errors.append(f'Missing required field: {field}')
# Image allowlist
image = m.get('image', '')
if image and not IMAGE_ALLOWLIST_RE.match(image):
errors.append(
f'image must match git.pic.ngo/roof/* pattern, got: {image}'
)
# Volume mount safety
for vol in m.get('volumes', []):
mount = vol.get('mount', '')
if mount in FORBIDDEN_MOUNTS:
errors.append(f'Forbidden volume mount: {mount}')
elif mount.startswith('/home/roof/pic'):
errors.append(f'Volume mount cannot be a prefix of /home/roof/pic: {mount}')
# iptables rules
for rule in m.get('iptables_rules', []):
if rule.get('type') != 'ACCEPT':
errors.append(
f'iptables_rules[].type must be ACCEPT, got: {rule.get("type")}'
)
if rule.get('dest_ip') != '${SERVICE_IP}':
errors.append(
f'iptables_rules[].dest_ip must be exactly ${{SERVICE_IP}}, '
f'got: {rule.get("dest_ip")}'
)
port = rule.get('dest_port')
if not isinstance(port, int) or not (1 <= port <= 65535):
errors.append(
f'iptables_rules[].dest_port must be an integer 1-65535, got: {port}'
)
proto = rule.get('proto', 'tcp')
if proto not in ('tcp', 'udp'):
errors.append(
f'iptables_rules[].proto must be tcp or udp, got: {proto}'
)
# Caddy route subdomain
caddy_route = m.get('caddy_route') or {}
if isinstance(caddy_route, dict):
subdomain = caddy_route.get('subdomain', '')
else:
subdomain = ''
if subdomain:
if subdomain in RESERVED_SUBDOMAINS:
errors.append(f'caddy_route.subdomain is reserved: {subdomain}')
elif not re.match(r'^[a-z][a-z0-9-]{0,30}$', subdomain):
errors.append(
f'caddy_route.subdomain must match ^[a-z][a-z0-9-]{{0,30}}$, '
f'got: {subdomain}'
)
# Env value safety
for env_entry in m.get('env', []):
val = str(env_entry.get('value', ''))
if not ENV_VALUE_RE.match(val):
errors.append(
f'env[].value contains disallowed characters: {val!r}'
)
return (len(errors) == 0, errors)
# ── IP allocation ─────────────────────────────────────────────────────
def _allocate_service_ip(self, service_id: str) -> str:
"""Allocate the next free IP from the service pool."""
identity = self.config_manager.get_identity()
ip_range = identity.get('ip_range', '172.20.0.0/16')
import ipaddress
network = ipaddress.IPv4Network(ip_range, strict=False)
base = int(network.network_address)
# IPs already assigned to named containers
reserved_offsets = set(CONTAINER_OFFSETS.values())
# IPs already assigned to installed services
service_ips: Dict[str, str] = identity.get('service_ips', {})
taken_ips = set(service_ips.values())
for offset in range(SERVICE_POOL_START, SERVICE_POOL_END + 1):
if offset in reserved_offsets:
continue
candidate = str(ipaddress.IPv4Address(base + offset))
if candidate not in taken_ips:
return candidate
raise RuntimeError('Service IP pool exhausted (offsets 20-254 all taken)')
# ── Compose override ──────────────────────────────────────────────────
def _render_compose_override(self, installed_records: dict) -> str:
"""Generate docker-compose YAML override for all installed services."""
services: Dict[str, Any] = {}
for svc_id, record in installed_records.items():
manifest = record.get('manifest', {})
container_name = record.get('container_name', svc_id)
image = manifest.get('image', record.get('image', ''))
service_ip = record.get('service_ip', '')
# Volumes
volumes = []
for vol in manifest.get('volumes', []):
vol_name = vol.get('name', '')
mount = vol.get('mount', '')
if vol_name and mount:
volumes.append(f'{vol_name}:{mount}')
# Environment
environment: Dict[str, str] = {}
for env_entry in manifest.get('env', []):
k = env_entry.get('key', '')
v = str(env_entry.get('value', ''))
if k:
environment[k] = v
svc_def: Dict[str, Any] = {
'image': image,
'container_name': container_name,
'restart': 'unless-stopped',
'logging': {
'driver': 'json-file',
'options': {
'max-size': '10m',
'max-file': '5',
},
},
'networks': {
'cell-network': {
'ipv4_address': service_ip,
}
},
}
if volumes:
svc_def['volumes'] = volumes
if environment:
svc_def['environment'] = environment
services[container_name] = svc_def
# Collect named volumes
named_volumes: Dict[str, Any] = {}
for svc_id, record in installed_records.items():
manifest = record.get('manifest', {})
for vol in manifest.get('volumes', []):
vol_name = vol.get('name', '')
if vol_name:
named_volumes[vol_name] = None # Docker default driver
doc: Dict[str, Any] = {
'version': '3.8',
'services': services,
'networks': {
'cell-network': {
'external': True,
}
},
}
if named_volumes:
doc['volumes'] = named_volumes
return yaml.dump(doc, default_flow_style=False, allow_unicode=True)
def _write_compose_override(self, content: str) -> None:
"""Atomic write of the compose override file."""
tmp_path = self.compose_override + '.tmp'
try:
os.makedirs(os.path.dirname(os.path.abspath(self.compose_override)),
exist_ok=True)
except (PermissionError, OSError):
pass
with open(tmp_path, 'w') as f:
f.write(content)
f.flush()
try:
os.fsync(f.fileno())
except OSError:
pass
os.replace(tmp_path, self.compose_override)
# ── Index / manifest fetching ─────────────────────────────────────────
def fetch_index(self) -> list:
"""Fetch and cache the service index."""
import time
now = time.time()
if self._index_cache is not None and (now - self._index_cache_time) < self._cache_ttl:
return self._index_cache
try:
resp = requests.get(self.index_url, timeout=10)
resp.raise_for_status()
data = resp.json()
self._index_cache = data if isinstance(data, list) else data.get('services', [])
self._index_cache_time = now
return self._index_cache
except Exception as e:
logger.warning(f'fetch_index failed: {e}')
return self._index_cache or []
def _fetch_manifest(self, service_id: str) -> dict:
"""Fetch a service manifest by ID."""
url = MANIFEST_URL_TPL.format(id=service_id)
resp = requests.get(url, timeout=10)
resp.raise_for_status()
return resp.json()
# ── Core operations ───────────────────────────────────────────────────
def install(self, service_id: str) -> dict:
"""Install a service from the store."""
from firewall_manager import apply_service_rules
with self._lock:
# Already installed?
installed = self.config_manager.get_installed_services()
if service_id in installed:
return {'ok': True, 'already_installed': True}
# Fetch and validate manifest
try:
manifest = self._fetch_manifest(service_id)
except Exception as e:
return {'ok': False, 'error': f'Failed to fetch manifest: {e}'}
ok, errs = self._validate_manifest(manifest)
if not ok:
return {'ok': False, 'errors': errs}
# Allocate IP
try:
ip = self._allocate_service_ip(service_id)
except RuntimeError as e:
return {'ok': False, 'error': str(e)}
# Build install record
record = {
'id': service_id,
'name': manifest.get('name', service_id),
'container_name': manifest['container_name'],
'image': manifest.get('image', ''),
'service_ip': ip,
'caddy_route': manifest.get('caddy_route'),
'iptables_rules': manifest.get('iptables_rules', []),
'manifest': manifest,
'installed_at': datetime.utcnow().isoformat(),
}
# Persist to config
self.config_manager.set_installed_service(service_id, record)
identity = self.config_manager.get_identity()
service_ips = dict(identity.get('service_ips', {}))
service_ips[service_id] = ip
self.config_manager.set_identity_field('service_ips', service_ips)
# Write compose override
all_installed = self.config_manager.get_installed_services()
try:
content = self._render_compose_override(all_installed)
self._write_compose_override(content)
except Exception as e:
logger.error(f'Failed to write compose override: {e}')
# Apply iptables rules (best-effort)
try:
apply_service_rules(service_id, ip, manifest.get('iptables_rules', []))
except Exception as e:
logger.warning(f'apply_service_rules for {service_id} failed (non-fatal): {e}')
# Regenerate Caddyfile
try:
caddy_routes = [
r.get('caddy_route')
for r in all_installed.values()
if r.get('caddy_route')
]
self.caddy_manager.regenerate_with_installed(caddy_routes)
except Exception as e:
logger.warning(f'caddy regenerate for {service_id} failed (non-fatal): {e}')
# Start the container via docker compose
base_compose = os.environ.get('COMPOSE_FILE', '/app/docker-compose.yml')
try:
result = subprocess.run(
['docker', 'compose',
'-f', base_compose,
'-f', self.compose_override,
'up', '-d', manifest['container_name']],
capture_output=True, text=True, timeout=120,
)
if result.returncode != 0:
logger.warning(
f'docker compose up for {service_id} failed: {result.stderr.strip()}'
)
except Exception as e:
logger.warning(f'docker compose up for {service_id} failed (non-fatal): {e}')
return {
'ok': True,
'service_ip': ip,
'container_name': manifest['container_name'],
}
def remove(self, service_id: str, purge_data: bool = False) -> dict:
"""Remove an installed service."""
from firewall_manager import clear_service_rules
with self._lock:
installed = self.config_manager.get_installed_services()
record = installed.get(service_id)
if not record:
return {'ok': False, 'error': f'Service {service_id} is not installed'}
container_name = record.get('container_name', service_id)
manifest = record.get('manifest', {})
base_compose = os.environ.get('COMPOSE_FILE', '/app/docker-compose.yml')
# Stop and remove container
try:
subprocess.run(
['docker', 'compose',
'-f', base_compose,
'-f', self.compose_override,
'stop', container_name],
capture_output=True, text=True, timeout=60,
)
except Exception as e:
logger.warning(f'docker compose stop for {service_id} failed (non-fatal): {e}')
try:
subprocess.run(
['docker', 'rm', '-f', container_name],
capture_output=True, text=True, timeout=30,
)
except Exception as e:
logger.warning(f'docker rm for {service_id} failed (non-fatal): {e}')
# Clear iptables rules
try:
clear_service_rules(service_id)
except Exception as e:
logger.warning(f'clear_service_rules for {service_id} failed (non-fatal): {e}')
# Remove from config, regenerate compose + caddy
self.config_manager.remove_installed_service(service_id)
remaining = self.config_manager.get_installed_services()
try:
content = self._render_compose_override(remaining)
self._write_compose_override(content)
except Exception as e:
logger.error(f'Failed to write compose override after remove: {e}')
try:
caddy_routes = [
r.get('caddy_route')
for r in remaining.values()
if r.get('caddy_route')
]
self.caddy_manager.regenerate_with_installed(caddy_routes)
except Exception as e:
logger.warning(f'caddy regenerate after remove failed (non-fatal): {e}')
# Purge named volumes if requested
if purge_data:
for vol in manifest.get('volumes', []):
vol_name = vol.get('name', '')
if vol_name:
try:
subprocess.run(
['docker', 'volume', 'rm', vol_name],
capture_output=True, text=True, timeout=30,
)
except Exception as e:
logger.warning(
f'docker volume rm {vol_name} failed (non-fatal): {e}'
)
return {'ok': True}
def list_services(self) -> dict:
"""Return available (from index) and installed services."""
available = self.fetch_index()
installed = self.config_manager.get_installed_services()
return {'available': available, 'installed': installed}
def reapply_on_startup(self) -> None:
"""Re-apply firewall and Caddy rules for all installed services on startup."""
from firewall_manager import apply_service_rules
installed = self.config_manager.get_installed_services()
if not installed:
return
# Regenerate compose override in case it was deleted
try:
content = self._render_compose_override(installed)
self._write_compose_override(content)
except Exception as e:
logger.warning(f'reapply_on_startup: compose override write failed: {e}')
# Re-apply iptables rules
for svc_id, record in installed.items():
ip = record.get('service_ip', '')
rules = record.get('iptables_rules', [])
try:
apply_service_rules(svc_id, ip, rules)
except Exception as e:
logger.warning(f'reapply_on_startup: apply_service_rules({svc_id}) failed: {e}')
# Regenerate Caddyfile
try:
caddy_routes = [
r.get('caddy_route')
for r in installed.values()
if r.get('caddy_route')
]
self.caddy_manager.regenerate_with_installed(caddy_routes)
except Exception as e:
logger.warning(f'reapply_on_startup: caddy regenerate failed: {e}')
+206
View File
@@ -0,0 +1,206 @@
#!/usr/bin/env python3
"""
SetupManager first-run wizard backend for PIC.
Handles validation, locking, and atomic completion of the initial setup
wizard. Called by api/routes/setup.py.
"""
import fcntl
import logging
import os
import re
from typing import Any, Dict, List
logger = logging.getLogger(__name__)
# Top 30 representative IANA time zones shown in the wizard
AVAILABLE_TIMEZONES = [
'UTC',
'America/New_York',
'America/Chicago',
'America/Denver',
'America/Los_Angeles',
'America/Anchorage',
'America/Honolulu',
'America/Sao_Paulo',
'America/Argentina/Buenos_Aires',
'America/Toronto',
'America/Vancouver',
'America/Mexico_City',
'Europe/London',
'Europe/Paris',
'Europe/Berlin',
'Europe/Madrid',
'Europe/Rome',
'Europe/Amsterdam',
'Europe/Moscow',
'Europe/Istanbul',
'Africa/Cairo',
'Africa/Johannesburg',
'Asia/Dubai',
'Asia/Kolkata',
'Asia/Bangkok',
'Asia/Shanghai',
'Asia/Tokyo',
'Asia/Seoul',
'Australia/Sydney',
'Pacific/Auckland',
]
AVAILABLE_SERVICES = [
'email',
'calendar',
'files',
'wireguard',
]
VALID_DOMAIN_MODES = {'pic_ngo', 'cloudflare', 'duckdns', 'http01', 'lan'}
CELL_NAME_RE = re.compile(r'^[a-z][a-z0-9-]{1,30}$')
class SetupManager:
"""Manages the first-run setup wizard state and completion."""
def __init__(self, config_manager, auth_manager):
self.config_manager = config_manager
self.auth_manager = auth_manager
# ── state helpers ─────────────────────────────────────────────────────
def is_setup_complete(self) -> bool:
"""Return True if setup has already been completed."""
return bool(self.config_manager.get_identity().get('setup_complete', False))
def get_setup_status(self) -> Dict[str, Any]:
"""Return current setup status and wizard metadata."""
return {
'complete': self.is_setup_complete(),
'available_services': AVAILABLE_SERVICES,
'available_timezones': AVAILABLE_TIMEZONES,
}
# ── validation ────────────────────────────────────────────────────────
def validate_cell_name(self, name: str) -> List[str]:
"""Validate a proposed cell name. Returns a list of error strings."""
errors: List[str] = []
if not name:
errors.append('Cell name is required.')
return errors
if not CELL_NAME_RE.match(name):
errors.append(
'Cell name must start with a lowercase letter, be 231 characters, '
'and contain only lowercase letters, digits, and hyphens.'
)
if name.startswith('-') or name.endswith('-'):
errors.append('Cell name must not start or end with a hyphen.')
return errors
def validate_password(self, password: str) -> List[str]:
"""Validate admin password strength. Returns a list of error strings."""
errors: List[str] = []
if not password:
errors.append('Password is required.')
return errors
if len(password) < 12:
errors.append('Password must be at least 12 characters long.')
if not re.search(r'[A-Z]', password):
errors.append('Password must contain at least one uppercase letter.')
if not re.search(r'[a-z]', password):
errors.append('Password must contain at least one lowercase letter.')
if not re.search(r'\d', password):
errors.append('Password must contain at least one digit.')
return errors
# ── main completion ───────────────────────────────────────────────────
def complete_setup(self, payload: Dict[str, Any]) -> Dict[str, Any]:
"""Run all validation, then atomically complete the setup wizard.
Returns ``{'success': True, 'redirect': '/login'}`` on success or
``{'success': False, 'errors': [...]}`` on any failure.
"""
errors: List[str] = []
# ── validate inputs ────────────────────────────────────────────────
cell_name = payload.get('cell_name', '')
password = payload.get('password', '')
domain_mode = payload.get('domain_mode', '')
timezone = payload.get('timezone', '')
services_enabled = payload.get('services_enabled', [])
ddns_provider = payload.get('ddns_provider', 'none')
errors.extend(self.validate_cell_name(cell_name))
errors.extend(self.validate_password(password))
if domain_mode not in VALID_DOMAIN_MODES:
errors.append(
f"domain_mode must be one of: {', '.join(sorted(VALID_DOMAIN_MODES))}."
)
if not timezone or not isinstance(timezone, str):
errors.append('timezone is required.')
if not isinstance(services_enabled, list):
errors.append('services_enabled must be a list.')
if errors:
return {'success': False, 'errors': errors}
# ── acquire file lock to prevent double-completion ─────────────────
lock_path = os.path.join(
os.environ.get('DATA_DIR', '/app/data'), 'api', '.setup.lock'
)
try:
os.makedirs(os.path.dirname(lock_path), exist_ok=True)
except OSError:
pass
try:
lock_fd = open(lock_path, 'w')
fcntl.flock(lock_fd, fcntl.LOCK_EX)
except OSError as exc:
logger.error(f'Could not acquire setup lock: {exc}')
return {'success': False, 'errors': ['Setup lock could not be acquired. Try again.']}
try:
# Re-check inside lock
if self.is_setup_complete():
return {'success': False, 'errors': ['Setup has already been completed.']}
# ── create admin user ──────────────────────────────────────────
ok = self.auth_manager.create_user(
username='admin',
password=password,
role='admin',
)
if not ok:
return {'success': False, 'errors': ['Failed to create admin user. The username may already exist.']}
# ── persist identity fields ────────────────────────────────────
self.config_manager.set_identity_field('cell_name', cell_name)
self.config_manager.set_identity_field('domain_mode', domain_mode)
self.config_manager.set_identity_field('timezone', timezone)
self.config_manager.set_identity_field('services_enabled', services_enabled)
self.config_manager.set_identity_field('ddns_provider', ddns_provider)
# NOTE: DDNS registration is deferred to Phase 3.
# For now we just store ddns_provider in config.
logger.info(
'DDNS registration skipped (Phase 1). '
'DDNS registration will happen in Phase 3. '
f'ddns_provider={ddns_provider!r} stored in identity config.'
)
# ── mark setup complete (must be last) ─────────────────────────
self.config_manager.set_identity_field('setup_complete', True)
logger.info(f"Setup completed. cell_name={cell_name!r}, domain_mode={domain_mode!r}")
return {'success': True, 'redirect': '/login'}
finally:
try:
fcntl.flock(lock_fd, fcntl.LOCK_UN)
lock_fd.close()
except Exception:
pass
+32 -15
View File
@@ -152,20 +152,24 @@ class WireGuardManager(BaseServiceManager):
cfg_port = self._get_configured_port() if os.path.exists(self._config_file()) else port
dns_ip, caddy_ip = self._get_dnat_container_ips()
dnat_up = (
f'iptables -t nat -A PREROUTING -i %i -p udp --dport 53 -j DNAT --to-destination {dns_ip}:53; '
f'iptables -t nat -A PREROUTING -i %i -p tcp --dport 53 -j DNAT --to-destination {dns_ip}:53; '
f'iptables -t nat -A PREROUTING -i %i -p tcp --dport 80 -j DNAT --to-destination {caddy_ip}:80; '
f'iptables -t nat -A PREROUTING -i %i -d {server_ip} -p udp --dport 53 -j DNAT --to-destination {dns_ip}:53; '
f'iptables -t nat -A PREROUTING -i %i -d {server_ip} -p tcp --dport 53 -j DNAT --to-destination {dns_ip}:53; '
f'iptables -t nat -A PREROUTING -i %i -d {server_ip} -p tcp --dport 80 -j DNAT --to-destination {caddy_ip}:80; '
f'iptables -I FORWARD -i %i -o eth0 -p tcp --dport 80 -j ACCEPT; '
f'iptables -I FORWARD -i %i -o eth0 -p udp --dport 53 -j ACCEPT; '
f'iptables -I FORWARD -i %i -o eth0 -p tcp --dport 53 -j ACCEPT'
f'iptables -I FORWARD -i %i -o eth0 -p tcp --dport 53 -j ACCEPT; '
f'iptables -I FORWARD -i eth0 -o %i -s 172.20.0.0/16 -j ACCEPT; '
f'iptables -t nat -A POSTROUTING -o %i -s 172.20.0.0/16 -j MASQUERADE'
)
dnat_down = (
f'iptables -t nat -D PREROUTING -i %i -p udp --dport 53 -j DNAT --to-destination {dns_ip}:53 2>/dev/null || true; '
f'iptables -t nat -D PREROUTING -i %i -p tcp --dport 53 -j DNAT --to-destination {dns_ip}:53 2>/dev/null || true; '
f'iptables -t nat -D PREROUTING -i %i -p tcp --dport 80 -j DNAT --to-destination {caddy_ip}:80 2>/dev/null || true; '
f'iptables -t nat -D PREROUTING -i %i -d {server_ip} -p udp --dport 53 -j DNAT --to-destination {dns_ip}:53 2>/dev/null || true; '
f'iptables -t nat -D PREROUTING -i %i -d {server_ip} -p tcp --dport 53 -j DNAT --to-destination {dns_ip}:53 2>/dev/null || true; '
f'iptables -t nat -D PREROUTING -i %i -d {server_ip} -p tcp --dport 80 -j DNAT --to-destination {caddy_ip}:80 2>/dev/null || true; '
f'iptables -D FORWARD -i %i -o eth0 -p tcp --dport 80 -j ACCEPT 2>/dev/null || true; '
f'iptables -D FORWARD -i %i -o eth0 -p udp --dport 53 -j ACCEPT 2>/dev/null || true; '
f'iptables -D FORWARD -i %i -o eth0 -p tcp --dport 53 -j ACCEPT 2>/dev/null || true'
f'iptables -D FORWARD -i %i -o eth0 -p tcp --dport 53 -j ACCEPT 2>/dev/null || true; '
f'iptables -D FORWARD -i eth0 -o %i -s 172.20.0.0/16 -j ACCEPT 2>/dev/null || true; '
f'iptables -t nat -D POSTROUTING -o %i -s 172.20.0.0/16 -j MASQUERADE 2>/dev/null || true'
)
return (
f'[Interface]\n'
@@ -190,12 +194,18 @@ class WireGuardManager(BaseServiceManager):
t = token.strip()
if not t.startswith('iptables'):
return False
# PREROUTING DNAT on ports 53 or 80
# PREROUTING DNAT on ports 53 or 80 (scoped or unscoped — we replace both)
if 'PREROUTING' in t and 'DNAT' in t and ('--dport 53' in t or '--dport 80' in t):
return True
# FORWARD accept to eth0 for ports 53 or 80 (service traffic forwarding)
if 'FORWARD' in t and '-o eth0' in t and ('--dport 53' in t or '--dport 80' in t):
return True
# Docker-to-WG FORWARD: eth0 → wg0 for 172.20.0.0/16
if 'FORWARD' in t and '-i eth0' in t and '172.20.0.0/16' in t:
return True
# Docker-to-WG MASQUERADE: POSTROUTING wg0 egress for 172.20.0.0/16
if 'POSTROUTING' in t and 'MASQUERADE' in t and '172.20.0.0/16' in t:
return True
return False
def ensure_postup_dnat(self) -> bool:
@@ -213,23 +223,30 @@ class WireGuardManager(BaseServiceManager):
with open(cf) as f:
content = f.read()
import ipaddress as _ipaddress
address = self._get_configured_address()
server_ip = str(_ipaddress.ip_interface(address).ip)
dns_ip, caddy_ip = self._get_dnat_container_ips()
dnat_up = (
f'iptables -t nat -A PREROUTING -i %i -p udp --dport 53 -j DNAT --to-destination {dns_ip}:53'
f'; iptables -t nat -A PREROUTING -i %i -p tcp --dport 53 -j DNAT --to-destination {dns_ip}:53'
f'; iptables -t nat -A PREROUTING -i %i -p tcp --dport 80 -j DNAT --to-destination {caddy_ip}:80'
f'iptables -t nat -A PREROUTING -i %i -d {server_ip} -p udp --dport 53 -j DNAT --to-destination {dns_ip}:53'
f'; iptables -t nat -A PREROUTING -i %i -d {server_ip} -p tcp --dport 53 -j DNAT --to-destination {dns_ip}:53'
f'; iptables -t nat -A PREROUTING -i %i -d {server_ip} -p tcp --dport 80 -j DNAT --to-destination {caddy_ip}:80'
f'; iptables -I FORWARD -i %i -o eth0 -p tcp --dport 80 -j ACCEPT'
f'; iptables -I FORWARD -i %i -o eth0 -p udp --dport 53 -j ACCEPT'
f'; iptables -I FORWARD -i %i -o eth0 -p tcp --dport 53 -j ACCEPT'
f'; iptables -I FORWARD -i eth0 -o %i -s 172.20.0.0/16 -j ACCEPT'
f'; iptables -t nat -A POSTROUTING -o %i -s 172.20.0.0/16 -j MASQUERADE'
)
dnat_down = (
f'iptables -t nat -D PREROUTING -i %i -p udp --dport 53 -j DNAT --to-destination {dns_ip}:53 2>/dev/null || true'
f'; iptables -t nat -D PREROUTING -i %i -p tcp --dport 53 -j DNAT --to-destination {dns_ip}:53 2>/dev/null || true'
f'; iptables -t nat -D PREROUTING -i %i -p tcp --dport 80 -j DNAT --to-destination {caddy_ip}:80 2>/dev/null || true'
f'iptables -t nat -D PREROUTING -i %i -d {server_ip} -p udp --dport 53 -j DNAT --to-destination {dns_ip}:53 2>/dev/null || true'
f'; iptables -t nat -D PREROUTING -i %i -d {server_ip} -p tcp --dport 53 -j DNAT --to-destination {dns_ip}:53 2>/dev/null || true'
f'; iptables -t nat -D PREROUTING -i %i -d {server_ip} -p tcp --dport 80 -j DNAT --to-destination {caddy_ip}:80 2>/dev/null || true'
f'; iptables -D FORWARD -i %i -o eth0 -p tcp --dport 80 -j ACCEPT 2>/dev/null || true'
f'; iptables -D FORWARD -i %i -o eth0 -p udp --dport 53 -j ACCEPT 2>/dev/null || true'
f'; iptables -D FORWARD -i %i -o eth0 -p tcp --dport 53 -j ACCEPT 2>/dev/null || true'
f'; iptables -D FORWARD -i eth0 -o %i -s 172.20.0.0/16 -j ACCEPT 2>/dev/null || true'
f'; iptables -t nat -D POSTROUTING -o %i -s 172.20.0.0/16 -j MASQUERADE 2>/dev/null || true'
)
lines = content.split('\n')
+6
View File
@@ -0,0 +1,6 @@
version: '3.3'
services: {}
networks:
cell-network:
external: true
name: pic_cell-network
+14 -1
View File
@@ -3,8 +3,9 @@ version: '3.3'
services:
# Reverse Proxy - Caddy for routing all .cell traffic
caddy:
image: caddy:2-alpine
image: git.pic.ngo/roof/pic-caddy:latest
container_name: cell-caddy
profiles: ["core", "full"]
ports:
- "80:80"
- "443:443"
@@ -28,6 +29,7 @@ services:
dns:
image: coredns/coredns:latest
container_name: cell-dns
profiles: ["core", "full"]
command: ["-conf", "/etc/coredns/Corefile"]
ports:
- "${DNS_PORT:-53}:53/udp"
@@ -49,6 +51,7 @@ services:
dhcp:
image: alpine:latest
container_name: cell-dhcp
profiles: ["full"]
ports:
- "${DHCP_PORT:-67}:67/udp"
volumes:
@@ -71,6 +74,7 @@ services:
ntp:
image: alpine:latest
container_name: cell-ntp
profiles: ["full"]
ports:
- "${NTP_PORT:-123}:123/udp"
volumes:
@@ -92,6 +96,7 @@ services:
mail:
image: mailserver/docker-mailserver:latest
container_name: cell-mail
profiles: ["full"]
hostname: mail
domainname: cell.local
env_file: ./config/mail/mailserver.env
@@ -121,6 +126,7 @@ services:
radicale:
image: tomsquest/docker-radicale:latest
container_name: cell-radicale
profiles: ["full"]
ports:
- "127.0.0.1:${RADICALE_PORT:-5232}:5232"
volumes:
@@ -140,6 +146,7 @@ services:
webdav:
image: bytemark/webdav:latest
container_name: cell-webdav
profiles: ["full"]
ports:
- "127.0.0.1:${WEBDAV_PORT:-8080}:80"
environment:
@@ -162,6 +169,7 @@ services:
wireguard:
image: linuxserver/wireguard:latest
container_name: cell-wireguard
profiles: ["core", "full"]
environment:
- SERVERMODE=true
- PUID=${PUID:-1000}
@@ -193,6 +201,7 @@ services:
api:
build: ./api
container_name: cell-api
profiles: ["core", "full"]
ports:
- "127.0.0.1:${API_PORT:-3000}:3000"
volumes:
@@ -206,6 +215,7 @@ services:
- /var/run/docker.sock:/var/run/docker.sock
- ./.env:/app/.env.compose
- ./docker-compose.yml:/app/docker-compose.yml:ro
- ./docker-compose.services.yml:/app/docker-compose.services.yml
- ./scripts:/app/scripts:ro
pid: host
restart: unless-stopped
@@ -225,6 +235,7 @@ services:
webui:
build: ./webui
container_name: cell-webui
profiles: ["core", "full"]
ports:
- "${WEBUI_PORT:-8081}:80"
restart: unless-stopped
@@ -241,6 +252,7 @@ services:
rainloop:
image: hardware/rainloop
container_name: cell-rainloop
profiles: ["full"]
restart: unless-stopped
networks:
cell-network:
@@ -259,6 +271,7 @@ services:
filegator:
image: filegator/filegator
container_name: cell-filegator
profiles: ["full"]
restart: unless-stopped
networks:
cell-network:
Executable
+331
View File
@@ -0,0 +1,331 @@
#!/usr/bin/env bash
# =============================================================================
# Personal Internet Cell (PIC) — Bash Installer
# =============================================================================
#
# SECURITY NOTICE
# ---------------
# You are about to execute this script with elevated privileges.
# ALWAYS review a script before running it:
#
# curl -fsSL https://git.pic.ngo/roof/pic/raw/branch/main/install.sh | less
#
# SHA256 checksum (verify before running):
# PLACEHOLDER — updated when script is published at git.pic.ngo
#
# Verify with:
# sha256sum install.sh
# # or, via curl before piping:
# curl -fsSL https://git.pic.ngo/roof/pic/raw/branch/main/install.sh \
# | sha256sum
#
# =============================================================================
#
# Usage:
# sudo bash install.sh # Standard install
# sudo bash install.sh --force # Bypass idempotency check
# sudo PIC_DIR=/srv/pic bash install.sh # Custom install directory
#
# Supported OS: Debian/Ubuntu (apt), Fedora/RHEL (dnf), Alpine Linux (apk)
#
# =============================================================================
set -euo pipefail
# ---------------------------------------------------------------------------
# Configuration
# ---------------------------------------------------------------------------
PIC_DIR="${PIC_DIR:-/opt/pic}"
PIC_REPO="${PIC_REPO:-https://git.pic.ngo/roof/pic.git}"
PIC_USER="${PIC_USER:-pic}"
API_HEALTH_URL="http://127.0.0.1:3000/health"
API_HEALTH_TIMEOUT=60
WEBUI_PORT=8081
FORCE=0
# Parse flags
for arg in "$@"; do
case "$arg" in
--force) FORCE=1 ;;
*)
echo "Unknown argument: $arg" >&2
echo "Usage: $0 [--force]" >&2
exit 1
;;
esac
done
# ---------------------------------------------------------------------------
# Color output
# ---------------------------------------------------------------------------
if [ -t 1 ] && command -v tput >/dev/null 2>&1 && tput setaf 1 >/dev/null 2>&1; then
RED="$(tput setaf 1)"
GREEN="$(tput setaf 2)"
YELLOW="$(tput setaf 3)"
BOLD="$(tput bold)"
RESET="$(tput sgr0)"
else
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[0;33m'
BOLD='\033[1m'
RESET='\033[0m'
fi
log_step() { printf "\n${BOLD}[%s/%s] %s${RESET}\n" "$1" "$TOTAL_STEPS" "$2"; }
log_ok() { printf " ${GREEN}${RESET} %s\n" "$1"; }
log_warn() { printf " ${YELLOW}${RESET} %s\n" "$1"; }
log_error() { printf "\n${RED}${BOLD}ERROR:${RESET}${RED} %s${RESET}\n" "$1" >&2; }
die() { log_error "$1"; exit 1; }
TOTAL_STEPS=7
# ---------------------------------------------------------------------------
# Must run as root
# ---------------------------------------------------------------------------
if [ "$(id -u)" -ne 0 ]; then
die "This installer must be run as root (use sudo)."
fi
# ---------------------------------------------------------------------------
# Idempotency guard
# ---------------------------------------------------------------------------
if [ -f "${PIC_DIR}/.installed" ] && [ "$FORCE" -eq 0 ]; then
printf "${YELLOW}Already installed.${RESET} Run ${BOLD}'make update'${RESET} to update.\n"
printf "To force a full reinstall, run: ${BOLD}$0 --force${RESET}\n"
exit 0
fi
# ---------------------------------------------------------------------------
# Step 1 — Detect OS / package manager
# ---------------------------------------------------------------------------
log_step 1 "Detecting operating system..."
PKG_MANAGER=""
OS_ID=""
if [ -f /etc/os-release ]; then
# shellcheck source=/dev/null
. /etc/os-release
OS_ID="${ID:-unknown}"
fi
case "$OS_ID" in
ubuntu|debian|raspbian)
PKG_MANAGER="apt"
;;
fedora)
PKG_MANAGER="dnf"
;;
rhel|centos|almalinux|rocky)
PKG_MANAGER="dnf"
;;
alpine)
PKG_MANAGER="apk"
;;
*)
# Last-resort detection
if command -v apt-get >/dev/null 2>&1; then
PKG_MANAGER="apt"
elif command -v dnf >/dev/null 2>&1; then
PKG_MANAGER="dnf"
elif command -v apk >/dev/null 2>&1; then
PKG_MANAGER="apk"
else
die "Unsupported OS '${OS_ID}'. PIC requires Debian/Ubuntu, Fedora/RHEL, or Alpine Linux."
fi
;;
esac
log_ok "Detected OS: ${OS_ID} (package manager: ${PKG_MANAGER})"
# ---------------------------------------------------------------------------
# Step 2 — Install required packages
# ---------------------------------------------------------------------------
log_step 2 "Installing dependencies..."
case "$PKG_MANAGER" in
apt)
export DEBIAN_FRONTEND=noninteractive
apt-get update -qq
apt-get install -y -qq git curl make docker.io docker-compose-plugin 2>&1 \
| grep -v "^$" | sed 's/^/ /' || true
# Verify docker compose plugin installed
if ! docker compose version >/dev/null 2>&1; then
log_warn "docker-compose-plugin not available; falling back to standalone docker-compose"
apt-get install -y -qq docker-compose 2>&1 | grep -v "^$" | sed 's/^/ /' || true
fi
;;
dnf)
dnf install -y -q git curl make docker 2>&1 | sed 's/^/ /' || true
# Enable and start Docker (dnf installs but doesn't enable it)
systemctl enable --now docker >/dev/null 2>&1 || true
# Docker Compose plugin comes bundled with the Docker CE package on Fedora/RHEL.
# If not present, install via the docker-compose-plugin package (Docker CE repo).
if ! docker compose version >/dev/null 2>&1; then
log_warn "docker compose plugin not found; installing docker-compose-plugin..."
dnf install -y -q docker-compose-plugin 2>&1 | sed 's/^/ /' || true
fi
;;
apk)
apk add --quiet git curl make docker docker-cli-compose 2>&1 | sed 's/^/ /' || true
# Enable Docker on Alpine (OpenRC)
rc-update add docker default >/dev/null 2>&1 || true
service docker start >/dev/null 2>&1 || true
;;
esac
# Final sanity checks
command -v git >/dev/null 2>&1 || die "git could not be installed. Aborting."
command -v curl >/dev/null 2>&1 || die "curl could not be installed. Aborting."
command -v make >/dev/null 2>&1 || die "make could not be installed. Aborting."
command -v docker >/dev/null 2>&1 || die "docker could not be installed. Aborting."
docker compose version >/dev/null 2>&1 || \
docker-compose version >/dev/null 2>&1 || \
die "Neither 'docker compose' (plugin) nor 'docker-compose' is available. Aborting."
log_ok "All dependencies satisfied"
# ---------------------------------------------------------------------------
# Step 3 — Create system user
# ---------------------------------------------------------------------------
log_step 3 "Configuring system user..."
if ! id "$PIC_USER" >/dev/null 2>&1; then
case "$PKG_MANAGER" in
apk)
adduser -S -D -H -s /sbin/nologin "$PIC_USER"
;;
*)
useradd --system --no-create-home --shell /usr/sbin/nologin "$PIC_USER"
;;
esac
log_ok "Created system user: ${PIC_USER}"
else
log_ok "System user already exists: ${PIC_USER}"
fi
# Ensure docker group exists and user is in it
if ! getent group docker >/dev/null 2>&1; then
groupadd docker
log_ok "Created docker group"
fi
if ! id -nG "$PIC_USER" | grep -qw docker; then
usermod -aG docker "$PIC_USER"
log_ok "Added ${PIC_USER} to docker group"
else
log_ok "${PIC_USER} is already in docker group"
fi
# ---------------------------------------------------------------------------
# Step 4 — Clone or update repository
# ---------------------------------------------------------------------------
log_step 4 "Setting up repository at ${PIC_DIR}..."
if [ -d "${PIC_DIR}/.git" ]; then
log_warn "Repository already cloned — running git pull"
git -C "$PIC_DIR" pull --ff-only 2>&1 | sed 's/^/ /'
log_ok "Repository updated"
elif [ -d "$PIC_DIR" ] && [ "$(ls -A "$PIC_DIR" 2>/dev/null)" ]; then
die "${PIC_DIR} exists and is not empty and is not a git repo. Aborting to avoid data loss."
else
mkdir -p "$(dirname "$PIC_DIR")"
git clone "$PIC_REPO" "$PIC_DIR" 2>&1 | sed 's/^/ /'
log_ok "Repository cloned to ${PIC_DIR}"
fi
# Ensure the pic user owns the directory
chown -R "${PIC_USER}:${PIC_USER}" "$PIC_DIR"
# ---------------------------------------------------------------------------
# Step 5 — Run make install
# ---------------------------------------------------------------------------
log_step 5 "Running 'make install'..."
# make install generates config, writes the systemd unit, and touches .installed.
# We run it as the pic user (via sudo -u) so files get correct ownership, but
# make install itself calls sudo internally where root is needed.
cd "$PIC_DIR"
if ! make install 2>&1 | sed 's/^/ /'; then
die "'make install' failed. Check the output above."
fi
log_ok "'make install' complete"
# ---------------------------------------------------------------------------
# Step 6 — Start core services
# ---------------------------------------------------------------------------
log_step 6 "Starting core services..."
cd "$PIC_DIR"
if ! make start-core 2>&1 | sed 's/^/ /'; then
die "'make start-core' failed. Check the output above."
fi
log_ok "Core services started"
# ---------------------------------------------------------------------------
# Step 7 — Health check + print wizard URL
# ---------------------------------------------------------------------------
log_step 7 "Waiting for API health check (up to ${API_HEALTH_TIMEOUT}s)..."
ELAPSED=0
HEALTHY=0
while [ "$ELAPSED" -lt "$API_HEALTH_TIMEOUT" ]; do
if curl -fsS "$API_HEALTH_URL" >/dev/null 2>&1; then
HEALTHY=1
break
fi
sleep 2
ELAPSED=$((ELAPSED + 2))
printf " Waiting... (%ds)\r" "$ELAPSED"
done
printf "\n"
if [ "$HEALTHY" -ne 1 ]; then
log_warn "API did not respond within ${API_HEALTH_TIMEOUT}s at ${API_HEALTH_URL}"
log_warn "The stack may still be starting up. Check with: make -C ${PIC_DIR} status"
log_warn "Or follow logs with: make -C ${PIC_DIR} logs"
else
log_ok "API is healthy"
fi
# Detect the host's primary outbound IP address
HOST_IP="$(ip route get 1.1.1.1 2>/dev/null | awk '/src/{print $7; exit}' || true)"
if [ -z "$HOST_IP" ]; then
# Fallback: first non-loopback IPv4
HOST_IP="$(hostname -I 2>/dev/null | awk '{print $1}' || true)"
fi
HOST_IP="${HOST_IP:-<host-ip>}"
# ---------------------------------------------------------------------------
# Done
# ---------------------------------------------------------------------------
printf "\n${GREEN}${BOLD}============================================================${RESET}\n"
printf "${GREEN}${BOLD} PIC installed successfully!${RESET}\n"
printf "${GREEN}${BOLD}============================================================${RESET}\n"
printf "\n"
printf " Open the setup wizard at:\n"
printf "\n"
printf " ${BOLD}http://${HOST_IP}:${WEBUI_PORT}/setup${RESET}\n"
printf "\n"
printf " Useful commands:\n"
printf " make -C ${PIC_DIR} status — check service status\n"
printf " make -C ${PIC_DIR} logs — follow all service logs\n"
printf " make -C ${PIC_DIR} start — start all services\n"
printf " make -C ${PIC_DIR} stop — stop all services\n"
printf " make -C ${PIC_DIR} update — pull latest code and restart\n"
printf "\n"
+15
View File
@@ -0,0 +1,15 @@
[Unit]
Description=Personal Internet Cell
After=docker.service
Requires=docker.service
[Service]
Type=oneshot
RemainAfterExit=yes
WorkingDirectory=/opt/pic
ExecStart=/usr/bin/make start
ExecStop=/usr/bin/make stop
TimeoutStartSec=120
[Install]
WantedBy=multi-user.target
+288 -101
View File
@@ -1,56 +1,115 @@
"""
E2E test: cross-cell routing for a split-tunnel VPN peer.
Creates a temporary WireGuard peer on cell2 (pic1 / test), brings up a real
WireGuard tunnel from the test-runner host, and verifies that cell1 (pic0 / dev)
is reachable end-to-end via the cell-to-cell link.
Creates a temporary WireGuard peer on cell2 (the first connected cell), brings up
a real WireGuard tunnel from the test-runner host, and verifies that cell1 (the
local cell) is reachable end-to-end via the cell-to-cell link.
Why this test is meaningful
---------------------------
10.0.0.1 is cell1's WireGuard server IP, reachable ONLY inside cell1's
cell-wireguard Docker container. It is NOT reachable directly from the
test-runner host (verified: 100% packet loss without VPN).
Cell1's WireGuard server IP is reachable ONLY inside cell1's cell-wireguard Docker
container. It is NOT reachable directly from the test-runner host. If a ping to
that IP succeeds, the full path was taken:
If a ping to 10.0.0.1 succeeds during the test, the full path was taken:
[test-runner wg-e2e] 192.168.31.52:51821 [pic1 cell-wireguard FORWARD]
[cell-to-cell WG tunnel] [pic0 cell-wireguard] 10.0.0.1
[test-runner wg-e2e] cell2 WireGuard [cell-to-cell tunnel] cell1 WG IP
Prerequisites
-------------
* SSH access to 192.168.31.52 (pic1) as 'roof' with no passphrase
* `wg-quick` and `sudo` available on the test runner (pic0)
* Both cells must have an active cell-to-cell WireGuard handshake
* /home/roof/pic/data/api/cell_links.json must have at least one connected cell
* /home/roof/pic/config/wireguard/wg_confs/wg0.conf must exist
* SSH access to cell2's LAN IP as 'roof' with no passphrase
* `wg-quick`, `dig`, and `sudo` available on the test runner
Skip conditions are checked at fixture time; no manual flag needed.
All configuration is read dynamically from config files no hardcoded IPs or ports.
Skip conditions are checked at module level; no manual flag needed.
"""
import ipaddress
import json
import os
import re
import subprocess
import secrets
import time
import pytest
# -------------------------------------------------------------------------
# Constants
# Dynamic configuration loading
# -------------------------------------------------------------------------
PIC1_LAN = '192.168.31.52' # test cell (cell2)
PIC1_WG_PORT = 51821 # WireGuard ListenPort on pic1
PIC1_WG_PUBKEY = 'ITl3+KfcNjsDq9ztE+1TC10rmeqaLmpGgTXEEk07BiE='
_CELL_LINKS_FILE = '/home/roof/pic/data/api/cell_links.json'
_WG_CONF_FILE = '/home/roof/pic/config/wireguard/wg_confs/wg0.conf'
_CELL_CONFIG_FILE = '/home/roof/pic/config/api/cell_config.json'
PIC1_WG_SERVER_IP = '10.0.2.1' # cell2's WireGuard server IP
PIC0_WG_SERVER_IP = '10.0.0.1' # cell1's WireGuard server IP (cross-cell target)
TEST_PEER_IP = '10.0.2.250' # unused IP in cell2's VPN subnet
TEST_PEER_CIDR = f'{TEST_PEER_IP}/32'
IFACE_NAME = 'pic-e2e-c2c'
def _load_cfg() -> dict:
"""Load all test parameters from local config files. Returns {} on any error."""
cfg = {}
# AllowedIPs for the test peer: cell2's local subnet + cell1's subnet (cross-cell)
SPLIT_TUNNEL_ALLOWED_IPS = '10.0.2.0/24, 10.0.0.0/24'
# --- cell1 (local/our) identity ---
try:
with open(_CELL_CONFIG_FILE) as f:
identity = json.load(f).get('_identity', {})
cfg['cell1_domain'] = identity.get('domain', '')
cfg['cell1_wg_port'] = int(identity.get('wireguard_port', 51820))
except Exception:
pass
# --- cell1 WG server IP from wg0.conf [Interface] Address ---
try:
with open(_WG_CONF_FILE) as f:
in_iface = False
for line in f:
line = line.strip()
if line == '[Interface]':
in_iface = True
elif line.startswith('[') and line.endswith(']'):
in_iface = False
elif in_iface and line.startswith('Address') and '=' in line:
addr = line.split('=', 1)[1].strip()
net = ipaddress.ip_interface(addr)
cfg['cell1_wg_ip'] = str(net.ip)
cfg['cell1_vpn_subnet'] = str(net.network)
break
except Exception:
pass
# --- cell2 (connected peer) from cell_links.json (first entry) ---
try:
with open(_CELL_LINKS_FILE) as f:
links = json.load(f)
if links:
link = links[0]
endpoint = link.get('endpoint', '')
if endpoint:
host, _, port = endpoint.rpartition(':')
cfg['cell2_lan_ip'] = host
cfg['cell2_wg_port'] = int(port)
cfg['cell2_pubkey'] = link.get('public_key', '')
cfg['cell2_wg_ip'] = link.get('dns_ip', '')
cfg['cell2_vpn_subnet'] = link.get('vpn_subnet', '')
cfg['cell2_domain'] = link.get('domain', '')
except Exception:
pass
# --- Derive TEST_PEER_IP: a high-range host in cell2's VPN subnet ---
# Use .250 (e.g., 10.0.2.250 for 10.0.2.0/24)
try:
net = ipaddress.ip_network(cfg['cell2_vpn_subnet'], strict=False)
cfg['test_peer_ip'] = str(net.network_address + 250)
except Exception:
pass
return cfg
_CFG = _load_cfg()
IFACE_NAME = 'pic-e2e-c2c'
IPTABLES_COMMENT = 'pic-e2e-c2c-test'
# Maximum acceptable average RTT for cells on the same LAN
MAX_LATENCY_MS = 10.0
pytestmark = pytest.mark.wg
@@ -63,19 +122,18 @@ def _run(cmd, **kw):
def _ssh(cmd, timeout=15):
"""Run a command on pic1 via SSH and return the CompletedProcess."""
"""Run a command on cell2 via SSH and return the CompletedProcess."""
lan_ip = _CFG.get('cell2_lan_ip', '')
return _run(
['ssh', '-o', 'StrictHostKeyChecking=no', '-o', 'BatchMode=yes',
'-o', f'ConnectTimeout=5', f'roof@{PIC1_LAN}', cmd],
'-o', 'ConnectTimeout=5', f'roof@{lan_ip}', cmd],
timeout=timeout,
)
def _pic1_wg(args, timeout=10):
"""Run a command inside pic1's cell-wireguard container via SSH."""
cmd = 'docker exec cell-wireguard ' + args
r = _ssh(cmd, timeout=timeout)
return r
def _pic2_wg(args, timeout=10):
"""Run a command inside cell2's cell-wireguard container via SSH."""
return _ssh('docker exec cell-wireguard ' + args, timeout=timeout)
def _ping(ip, count=3, wait=2):
@@ -87,40 +145,43 @@ def _cleanup_iface():
_run(['sudo', 'ip', 'link', 'delete', IFACE_NAME], timeout=5)
def _cleanup_pic1_peer(pubkey):
_pic1_wg(f'wg set wg0 peer {pubkey} remove')
def _cleanup_pic2_peer(pubkey):
_pic2_wg(f'wg set wg0 peer {pubkey} remove')
def _cleanup_pic1_iptables():
_pic1_wg(f'iptables -D FORWARD -s {TEST_PEER_IP} -j ACCEPT '
f'-m comment --comment {IPTABLES_COMMENT}')
def _cleanup_pic2_iptables(peer_ip):
_pic2_wg(
f'iptables -D FORWARD -s {peer_ip} -j ACCEPT '
f'-m comment --comment {IPTABLES_COMMENT}'
)
# -------------------------------------------------------------------------
# Session-level skip check
# Skip checks
# -------------------------------------------------------------------------
def _check_prerequisites():
"""Return a skip reason string, or None if all prereqs are met."""
# Check wg-quick
required_keys = ('cell1_wg_ip', 'cell2_lan_ip', 'cell2_pubkey',
'cell2_wg_ip', 'test_peer_ip', 'cell2_vpn_subnet',
'cell1_vpn_subnet')
missing = [k for k in required_keys if not _CFG.get(k)]
if missing:
return f'Config incomplete (missing: {", ".join(missing)}). ' \
f'Ensure cell_links.json and wg0.conf exist and are populated.'
if _run(['which', 'wg-quick']).returncode != 0:
return 'wg-quick not found on test runner'
# Check sudo
if _run(['which', 'dig']).returncode != 0:
return 'dig not found on test runner'
if _run(['sudo', '-n', 'true']).returncode != 0:
return 'passwordless sudo not available on test runner'
# Check SSH to pic1
r = _ssh('echo ok', timeout=6)
if r.returncode != 0 or 'ok' not in r.stdout:
return f'SSH to {PIC1_LAN} failed: {r.stderr.strip() or r.stdout.strip()}'
# Check that 10.0.0.1 is NOT reachable directly (otherwise test is meaningless)
# (a failure here is just a warning, not a skip)
lan = _CFG.get('cell2_lan_ip', '?')
return f'SSH to {lan} failed: {r.stderr.strip() or r.stdout.strip()}'
return None
# -------------------------------------------------------------------------
# Module-level skip
# -------------------------------------------------------------------------
_SKIP_REASON = _check_prerequisites()
@@ -131,20 +192,23 @@ _SKIP_REASON = _check_prerequisites()
@pytest.fixture(scope='module')
def wg_setup(tmp_path_factory):
"""
Module-scoped fixture: adds test peer to pic1, brings up wg interface on
pic0 host, yields, then tears everything down.
Yields a dict:
{
'peer_ip': '10.0.2.250',
'allowed_ips': '10.0.2.0/24, 10.0.0.0/24',
'privkey': '<wg private key>',
'pubkey': '<wg public key>',
}
Module-scoped fixture: adds test peer to cell2, brings up wg interface on
cell1 (test runner), yields config dict, then tears everything down.
"""
if _SKIP_REASON:
pytest.skip(_SKIP_REASON)
cell2_lan_ip = _CFG['cell2_lan_ip']
cell2_wg_port = _CFG['cell2_wg_port']
cell2_pubkey = _CFG['cell2_pubkey']
cell2_vpn_subnet = _CFG['cell2_vpn_subnet']
cell1_vpn_subnet = _CFG['cell1_vpn_subnet']
test_peer_ip = _CFG['test_peer_ip']
test_peer_cidr = f'{test_peer_ip}/32'
# AllowedIPs: cell2's subnet + cell1's subnet (split-tunnel cross-cell)
allowed_ips = f'{cell2_vpn_subnet}, {cell1_vpn_subnet}'
tmp_path = tmp_path_factory.mktemp('wg_e2e_c2c')
# --- Generate a WireGuard key pair ---
@@ -157,28 +221,32 @@ def wg_setup(tmp_path_factory):
assert pub_r.returncode == 0, f'wg pubkey failed: {pub_r.stderr}'
pubkey = pub_r.stdout.strip()
# --- Add peer to pic1's wg0 (live, no restart needed) ---
r = _pic1_wg(f'wg set wg0 peer {pubkey} allowed-ips {TEST_PEER_CIDR} persistent-keepalive 25')
assert r.returncode == 0, f'wg set peer failed on pic1: {r.stderr}'
# --- Add peer to cell2's wg0 (live, no restart needed) ---
r = _pic2_wg(f'wg set wg0 peer {pubkey} allowed-ips {test_peer_cidr} persistent-keepalive 25')
assert r.returncode == 0, f'wg set peer failed on cell2: {r.stderr}'
# --- Add permissive iptables rule so test traffic passes FORWARD ---
r = _pic1_wg(
f'iptables -I FORWARD 1 -s {TEST_PEER_IP} -j ACCEPT '
# --- Add permissive iptables ACCEPT so test traffic passes cell2's FORWARD ---
r = _pic2_wg(
f'iptables -I FORWARD 1 -s {test_peer_ip} -j ACCEPT '
f'-m comment --comment {IPTABLES_COMMENT}'
)
assert r.returncode == 0, f'iptables -I FORWARD failed on pic1: {r.stderr}'
assert r.returncode == 0, f'iptables -I FORWARD failed on cell2: {r.stderr}'
# --- Write wg-quick config on the test runner ---
conf_path = str(tmp_path / f'{IFACE_NAME}.conf')
# Table=off: let wg-quick create the interface without managing routes.
# We add routes manually below so that existing host routes (added by
# ensure_cell_subnet_routes) don't conflict with wg-quick's route additions.
conf = (
f'[Interface]\n'
f'PrivateKey = {privkey}\n'
f'Address = {TEST_PEER_IP}/32\n'
f'Address = {test_peer_ip}/32\n'
f'Table = off\n'
f'\n'
f'[Peer]\n'
f'PublicKey = {PIC1_WG_PUBKEY}\n'
f'Endpoint = {PIC1_LAN}:{PIC1_WG_PORT}\n'
f'AllowedIPs = {SPLIT_TUNNEL_ALLOWED_IPS}\n'
f'PublicKey = {cell2_pubkey}\n'
f'Endpoint = {cell2_lan_ip}:{cell2_wg_port}\n'
f'AllowedIPs = {allowed_ips}\n'
f'PersistentKeepalive = 25\n'
)
with open(conf_path, 'w') as f:
@@ -189,15 +257,21 @@ def wg_setup(tmp_path_factory):
up_r = _run(['sudo', 'wg-quick', 'up', conf_path], timeout=15)
assert up_r.returncode == 0, f'wg-quick up failed: {up_r.stderr}\n{up_r.stdout}'
# Give WireGuard a moment to establish the handshake
# --- Add routes manually (replace is idempotent, handles pre-existing routes) ---
for subnet in allowed_ips.split(','):
_run(['sudo', 'ip', 'route', 'replace', subnet.strip(), 'dev', IFACE_NAME], timeout=5)
time.sleep(3)
yield {
'peer_ip': TEST_PEER_IP,
'allowed_ips': SPLIT_TUNNEL_ALLOWED_IPS,
'privkey': privkey,
'pubkey': pubkey,
'conf_path': conf_path,
'test_peer_ip': test_peer_ip,
'allowed_ips': allowed_ips,
'privkey': privkey,
'pubkey': pubkey,
'conf_path': conf_path,
'cell1_wg_ip': _CFG['cell1_wg_ip'],
'cell2_wg_ip': _CFG['cell2_wg_ip'],
'cell1_domain': _CFG.get('cell1_domain', ''),
}
# --- Teardown ---
@@ -206,8 +280,8 @@ def wg_setup(tmp_path_factory):
os.unlink(conf_path)
except Exception:
pass
_cleanup_pic1_iptables()
_cleanup_pic1_peer(pubkey)
_cleanup_pic2_iptables(test_peer_ip)
_cleanup_pic2_peer(pubkey)
# -------------------------------------------------------------------------
@@ -219,24 +293,25 @@ class TestCellToCellRouting:
Full end-to-end: split-tunnel peer on cell2 reaches cell1 via cell-to-cell tunnel.
"""
def test_prerequisites_10_0_0_1_not_reachable_directly(self):
"""Confirm 10.0.0.1 is NOT reachable from host without VPN (test validity check)."""
assert not _ping(PIC0_WG_SERVER_IP, count=1, wait=1), (
f'{PIC0_WG_SERVER_IP} is reachable WITHOUT the VPN — the test would be '
f'a false positive. The test is only meaningful when this IP is unreachable '
f'without the tunnel.'
def test_prerequisites_cell1_not_reachable_directly(self):
"""Confirm cell1's WG IP is NOT reachable from host without VPN (test validity check)."""
cell1_wg_ip = _CFG.get('cell1_wg_ip', '10.0.0.1')
assert not _ping(cell1_wg_ip, count=1, wait=1), (
f'{cell1_wg_ip} is reachable WITHOUT the VPN — test would be a false positive. '
f'The test is only meaningful when this IP is unreachable without the tunnel.'
)
def test_cell2_wg_ip_reachable(self, wg_setup):
"""Cell2's WireGuard server IP is reachable (basic tunnel sanity)."""
assert _ping(PIC1_WG_SERVER_IP), (
f'Cell2 WG server IP {PIC1_WG_SERVER_IP} not reachable. '
cell2_wg_ip = wg_setup['cell2_wg_ip']
assert _ping(cell2_wg_ip), (
f'Cell2 WG server IP {cell2_wg_ip} not reachable. '
f'Handshake may not have established. '
f'Peer allowed-ips: {wg_setup["allowed_ips"]}'
)
def test_handshake_established(self, wg_setup):
"""A WireGuard handshake with pic1 has completed (within 30 s)."""
"""A WireGuard handshake with cell2 has completed (within 30 s)."""
deadline = time.time() + 30
while time.time() < deadline:
r = _run(['sudo', 'wg', 'show', IFACE_NAME], timeout=5)
@@ -244,34 +319,59 @@ class TestCellToCellRouting:
return
time.sleep(2)
pytest.fail(
f'No WireGuard handshake with pic1 after 30 s.\n'
f'No WireGuard handshake with cell2 after 30 s.\n'
f'wg show output:\n{r.stdout}'
)
def test_cross_cell_wg_ip_reachable(self, wg_setup):
"""
Cell1's WireGuard IP (10.0.0.1) is reachable from a peer connected to cell2.
Cell1's WireGuard IP is reachable from a peer connected to cell2.
This is the critical cross-cell routing test. The full path is:
test-runner wg-e2e pic1 cell-wireguard FORWARD cell-to-cell tunnel pic0 10.0.0.1
test-runner wg-e2e cell2 FORWARD cell-to-cell tunnel cell1 WG IP
"""
assert _ping(PIC0_WG_SERVER_IP, count=3, wait=3), (
f'Cell1 WG IP {PIC0_WG_SERVER_IP} NOT reachable from split-tunnel peer on cell2. '
cell1_wg_ip = wg_setup['cell1_wg_ip']
assert _ping(cell1_wg_ip, count=3, wait=3), (
f'Cell1 WG IP {cell1_wg_ip} NOT reachable from split-tunnel peer on cell2. '
f'\nAllowed IPs: {wg_setup["allowed_ips"]}'
f'\nThis means the cell-to-cell routing is broken. Check:'
f'\n 1. pic1 FORWARD chain has ESTABLISHED,RELATED ACCEPT'
f'\n 2. pic1 wg0.conf has AllowedIPs=10.0.0.0/24 for the dev cell peer'
f'\n 3. Cell-to-cell WireGuard handshake is recent (wg show on pic1)'
f'\n 1. cell2 FORWARD chain has ESTABLISHED,RELATED ACCEPT'
f'\n 2. cell2 wg0.conf has AllowedIPs covering cell1 subnet'
f'\n 3. Cell-to-cell WireGuard handshake is recent (wg show on cell2)'
)
def test_cross_cell_ping_latency(self, wg_setup):
"""Cross-cell ping RTT is under 10ms — both cells are on the same LAN.
High latency (>10ms) indicates traffic is routing via the internet instead
of directly over the LAN WireGuard tunnel. Check cell_links.json endpoints.
"""
cell1_wg_ip = wg_setup['cell1_wg_ip']
r = _run(['ping', '-c', '10', '-W', '2', cell1_wg_ip], timeout=30)
assert r.returncode == 0, (
f'Ping to {cell1_wg_ip} failed completely: {r.stderr}'
)
m = re.search(
r'rtt min/avg/max/mdev = [\d.]+/([\d.]+)/[\d.]+/[\d.]+ ms',
r.stdout
)
assert m, f'Could not parse ping RTT from output:\n{r.stdout}'
avg_ms = float(m.group(1))
assert avg_ms < MAX_LATENCY_MS, (
f'Cross-cell avg RTT {avg_ms:.2f}ms exceeds {MAX_LATENCY_MS}ms. '
f'Both cells are on the same LAN — high latency means traffic routes '
f'via the internet. Check cell_links.json uses LAN IPs, not public IPs.'
)
def test_cross_cell_api_reachable(self, wg_setup):
"""Cell1's API /health is reachable through the cell-to-cell tunnel."""
import urllib.request, urllib.error
url = f'http://{PIC0_WG_SERVER_IP}:3000/health'
cell1_wg_ip = wg_setup['cell1_wg_ip']
url = f'http://{cell1_wg_ip}:3000/health'
try:
with urllib.request.urlopen(url, timeout=8) as resp:
import json
body = json.loads(resp.read())
import json as _json
body = _json.loads(resp.read())
assert body.get('status') == 'healthy', (
f'Cell1 API returned unexpected health: {body}'
)
@@ -285,15 +385,14 @@ class TestCellToCellRouting:
def test_cross_cell_web_reachable(self, wg_setup):
"""Cell1's web service (port 80 via Caddy) is reachable through the tunnel."""
import urllib.request, urllib.error
# Port 80 goes to Caddy → services. We expect any HTTP response (even a redirect).
url = f'http://{PIC0_WG_SERVER_IP}/'
cell1_wg_ip = wg_setup['cell1_wg_ip']
url = f'http://{cell1_wg_ip}/'
try:
with urllib.request.urlopen(url, timeout=8) as resp:
assert resp.status in (200, 301, 302, 307, 308), (
f'Unexpected HTTP status from cell1 Caddy: {resp.status}'
)
except urllib.error.HTTPError as e:
# HTTPError means we got a response — tunnel works even if it's a 4xx/5xx
assert e.code < 500, (
f'Cell1 Caddy returned server error {e.code} — may indicate a Caddy issue'
)
@@ -301,3 +400,91 @@ class TestCellToCellRouting:
pytest.fail(
f'Cell1 web (Caddy) at {url} not reachable via tunnel: {e}'
)
def test_tunnel_latency_consistency(self, wg_setup):
"""WireGuard tunnel has no significant latency spikes on a local wired network.
Sends 50 pings at 0.2s intervals to cell2's WG server IP. Pass condition:
5% of pings ( 2 out of 50) exceed max(3× median RTT, 15ms).
"""
cell2_wg_ip = wg_setup['cell2_wg_ip']
r = _run(['ping', '-c', '50', '-i', '0.2', '-W', '2', cell2_wg_ip], timeout=25)
assert r.returncode == 0, f'All pings to {cell2_wg_ip} failed: {r.stderr}'
rtts = [float(m.group(1)) for m in re.finditer(r'time=([\d.]+) ms', r.stdout)]
assert len(rtts) >= 40, (
f'Too few ping replies ({len(rtts)}/50) — packet loss may mask latency issues'
)
sorted_rtts = sorted(rtts)
median = sorted_rtts[len(sorted_rtts) // 2]
spike_threshold = max(median * 3.0, 15.0)
spikes = [rtt for rtt in rtts if rtt > spike_threshold]
spike_ratio = len(spikes) / len(rtts)
assert spike_ratio <= 0.05, (
f'Latency spikes: {len(spikes)}/{len(rtts)} packets ({spike_ratio:.0%}) '
f'exceeded {spike_threshold:.1f}ms (3× median {median:.1f}ms). '
f'Spike values: {[f"{s:.1f}ms" for s in sorted(spikes)]}'
)
def test_cross_cell_domain_accessible(self, wg_setup):
"""A service domain from cell1 is resolvable via cell2's DNS and HTTP-reachable.
DNS chain:
test-peer cell2_wg_ip:53 (DNAT cell-dns on cell2)
cell2 Corefile forwards cell1_domain cell1_wg_ip:53
cell1 cell-dns returns A record cell1_wg_ip
HTTP:
test-peer cell1_wg_ip:80 (Host: calendar.<cell1_domain>)
cell-to-cell tunnel cell1 Caddy
Requires: scoped DNAT (wg0 PREROUTING -d server_ip) on both cells
and Dockerwg0 routing on cell2 (host route + MASQUERADE).
"""
cell1_domain = wg_setup.get('cell1_domain', '')
cell2_wg_ip = wg_setup['cell2_wg_ip']
cell1_wg_ip = wg_setup['cell1_wg_ip']
if not cell1_domain:
pytest.skip('cell1_domain not configured — cannot test domain access')
calendar_host = f'calendar.{cell1_domain}'
# --- DNS resolution via cell2's DNS ---
r = _run(
['dig', f'@{cell2_wg_ip}', calendar_host, '+short', '+time=5', '+tries=2'],
timeout=15
)
assert r.returncode == 0, (
f'dig @{cell2_wg_ip} {calendar_host} failed: {r.stderr.strip()}\n'
f'DNS chain: test-peer → {cell2_wg_ip}:53 → cell-dns(cell2) '
f'{cell1_wg_ip}:53 (cell1). '
f'If this fails, check: (1) DNAT on cell2 scoped to -d {cell2_wg_ip}, '
f'(2) Docker→wg0 routing on cell2 (host route + MASQUERADE).'
)
resolved = r.stdout.strip()
assert resolved == cell1_wg_ip, (
f'DNS resolved {calendar_host!r} to {resolved!r}, '
f'expected {cell1_wg_ip!r}. '
f'cell1 zone: all {cell1_domain} names should point to {cell1_wg_ip}.'
)
# --- HTTP access via domain name (Host header → Caddy routing) ---
import urllib.request, urllib.error
url = f'http://{cell1_wg_ip}/'
req = urllib.request.Request(url, headers={'Host': calendar_host})
try:
with urllib.request.urlopen(req, timeout=8) as resp:
assert resp.status < 500, (
f'cell1 Caddy returned {resp.status} for Host:{calendar_host}'
)
except urllib.error.HTTPError as e:
assert e.code < 500, (
f'cell1 Caddy server error {e.code} for Host:{calendar_host}'
)
except urllib.error.URLError as e:
pytest.fail(
f'HTTP to {url} (Host:{calendar_host}) via tunnel failed: {e}'
)
+228
View File
@@ -0,0 +1,228 @@
"""Tests for CaddyManager — Caddyfile generation per domain mode plus
admin-API reload, health check, and consecutive-failure bookkeeping.
"""
import os
import sys
import unittest
from unittest.mock import MagicMock, patch
import requests
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'api'))
from caddy_manager import CaddyManager # noqa: E402
def _mgr(tmpdir=None, identity=None):
"""Build a CaddyManager backed by a mock config_manager."""
cm = MagicMock()
cm.get_identity.return_value = identity or {}
mgr = CaddyManager(
config_manager=cm,
data_dir=tmpdir or '/tmp/pic-test-data',
config_dir=tmpdir or '/tmp/pic-test-config',
)
return mgr
CALENDAR_ROUTE = (
"handle /calendar* {\n"
" reverse_proxy cell-radicale:5232\n"
"}"
)
FILES_ROUTE = (
"handle /files* {\n"
" reverse_proxy cell-filegator:8080\n"
"}"
)
class TestGenerateCaddyfileLan(unittest.TestCase):
def test_lan_mode_has_auto_https_off_and_no_acme(self):
mgr = _mgr()
identity = {'cell_name': 'mycell', 'domain_mode': 'lan'}
out = mgr.generate_caddyfile(identity, [])
self.assertIn('auto_https off', out)
# No ACME anywhere
self.assertNotIn('acme_ca', out)
self.assertNotIn('acme_email', out)
self.assertNotIn('dns pic_ngo', out)
self.assertNotIn('dns cloudflare', out)
# Internal-CA TLS pair
self.assertIn('tls /etc/caddy/internal/cert.pem '
'/etc/caddy/internal/key.pem', out)
# Cell hostname plus virtual IP listener
self.assertIn('http://mycell.cell', out)
self.assertIn('http://172.20.0.2:80', out)
class TestGenerateCaddyfilePicNgo(unittest.TestCase):
def test_pic_ngo_has_dns_plugin_and_wildcard(self):
mgr = _mgr()
identity = {'cell_name': 'alpha', 'domain_mode': 'pic_ngo'}
out = mgr.generate_caddyfile(identity, [])
self.assertIn('dns pic_ngo', out)
self.assertIn('*.alpha.pic.ngo', out)
self.assertIn('alpha.pic.ngo', out)
self.assertIn('{$PIC_NGO_DDNS_TOKEN}', out)
self.assertIn('{$PIC_NGO_DDNS_API}', out)
self.assertIn('email admin@alpha.pic.ngo', out)
# ACME staging hook
self.assertIn('acme_ca {$ACME_CA_URL}', out)
class TestGenerateCaddyfileCloudflare(unittest.TestCase):
def test_cloudflare_has_dns_cloudflare(self):
mgr = _mgr()
identity = {
'cell_name': 'beta',
'domain_mode': 'cloudflare',
'custom_domain': 'example.com',
}
out = mgr.generate_caddyfile(identity, [])
self.assertIn('dns cloudflare {$CF_API_TOKEN}', out)
self.assertIn('*.example.com', out)
self.assertIn('email {$ACME_EMAIL}', out)
self.assertIn('acme_ca {$ACME_CA_URL}', out)
class TestGenerateCaddyfileDuckDns(unittest.TestCase):
def test_duckdns_has_dns_duckdns(self):
mgr = _mgr()
identity = {'cell_name': 'gamma', 'domain_mode': 'duckdns'}
out = mgr.generate_caddyfile(identity, [])
self.assertIn('dns duckdns {$DUCKDNS_TOKEN}', out)
self.assertIn('*.gamma.duckdns.org', out)
class TestGenerateCaddyfileHttp01(unittest.TestCase):
def test_http01_no_tls_block_and_per_service_blocks(self):
mgr = _mgr()
identity = {
'cell_name': 'delta',
'domain_mode': 'http01',
'custom_domain': 'delta.noip.me',
}
services = [
{'name': 'calendar', 'caddy_route':
'reverse_proxy cell-radicale:5232'},
{'name': 'files', 'caddy_route':
'reverse_proxy cell-filegator:8080'},
]
out = mgr.generate_caddyfile(identity, services)
# No wildcard, no DNS-01 plugins.
self.assertNotIn('*.delta', out)
self.assertNotIn('dns ', out)
# No explicit tls block (no internal CA, no plugin) — the host block
# itself is left empty so Caddy uses HTTP-01 by default.
self.assertNotIn('tls {', out)
# Per-service blocks
self.assertIn('calendar.delta.noip.me {', out)
self.assertIn('files.delta.noip.me {', out)
self.assertIn('reverse_proxy cell-radicale:5232', out)
self.assertIn('reverse_proxy cell-filegator:8080', out)
class TestServiceRoutesIncluded(unittest.TestCase):
def test_installed_service_route_appears_in_output(self):
mgr = _mgr()
identity = {'cell_name': 'eps', 'domain_mode': 'lan'}
services = [
{'name': 'calendar', 'caddy_route': CALENDAR_ROUTE},
{'name': 'files', 'caddy_route': FILES_ROUTE},
]
out = mgr.generate_caddyfile(identity, services)
self.assertIn('handle /calendar*', out)
self.assertIn('reverse_proxy cell-radicale:5232', out)
self.assertIn('handle /files*', out)
self.assertIn('reverse_proxy cell-filegator:8080', out)
# Core routes still emitted
self.assertIn('reverse_proxy cell-api:3000', out)
self.assertIn('reverse_proxy cell-webui:80', out)
class TestReloadCaddyAdminAPI(unittest.TestCase):
def test_reload_calls_admin_api_load_endpoint(self):
mgr = _mgr()
# Point at a tmp Caddyfile so we can read it back during reload.
import tempfile
tmp = tempfile.NamedTemporaryFile('w', delete=False, suffix='.caddyfile')
tmp.write(":80 { reverse_proxy cell-webui:80 }\n")
tmp.close()
mgr.caddyfile_path = tmp.name
with patch('caddy_manager.requests.post') as mock_post:
mock_post.return_value = MagicMock(status_code=200, text='ok')
ok = mgr.reload_caddy()
self.assertTrue(ok)
mock_post.assert_called_once()
args, kwargs = mock_post.call_args
# First positional arg is the URL
self.assertEqual(args[0], 'http://cell-caddy:2019/load')
self.assertEqual(kwargs['headers']['Content-Type'], 'text/caddyfile')
self.assertIn('cell-webui:80', kwargs['data'])
os.unlink(tmp.name)
class TestHealthCheck(unittest.TestCase):
def test_returns_true_on_200(self):
mgr = _mgr()
with patch('caddy_manager.requests.get') as mock_get:
mock_get.return_value = MagicMock(status_code=200)
self.assertTrue(mgr.check_caddy_health())
mock_get.assert_called_once()
# URL must be the admin API root
self.assertIn('cell-caddy:2019', mock_get.call_args[0][0])
def test_returns_false_on_connection_error(self):
mgr = _mgr()
with patch('caddy_manager.requests.get',
side_effect=requests.ConnectionError('refused')):
self.assertFalse(mgr.check_caddy_health())
def test_returns_false_on_non_200(self):
mgr = _mgr()
with patch('caddy_manager.requests.get') as mock_get:
mock_get.return_value = MagicMock(status_code=500)
self.assertFalse(mgr.check_caddy_health())
class TestFailureCounter(unittest.TestCase):
def test_increments_and_resets(self):
mgr = _mgr()
self.assertEqual(mgr.get_health_failure_count(), 0)
self.assertEqual(mgr.increment_health_failure(), 1)
self.assertEqual(mgr.increment_health_failure(), 2)
self.assertEqual(mgr.increment_health_failure(), 3)
self.assertEqual(mgr.get_health_failure_count(), 3)
mgr.reset_health_failures()
self.assertEqual(mgr.get_health_failure_count(), 0)
class TestCertStatus(unittest.TestCase):
def test_returns_default_when_no_tls_in_identity(self):
mgr = _mgr(identity={'cell_name': 'x', 'domain_mode': 'lan'})
out = mgr.get_cert_status()
self.assertEqual(out['status'], 'unknown')
self.assertIsNone(out['expiry'])
self.assertIsNone(out['days_remaining'])
def test_returns_tls_block_when_present(self):
mgr = _mgr(identity={
'cell_name': 'x',
'domain_mode': 'pic_ngo',
'tls': {
'status': 'valid',
'expiry': '2026-08-01T00:00:00Z',
'days_remaining': 84,
},
})
out = mgr.get_cert_status()
self.assertEqual(out['status'], 'valid')
self.assertEqual(out['expiry'], '2026-08-01T00:00:00Z')
self.assertEqual(out['days_remaining'], 84)
if __name__ == '__main__':
unittest.main()
+690
View File
@@ -0,0 +1,690 @@
"""
Tests for ConnectivityManager config validation, file upload, status,
exit listing, peer exit assignment, and route application.
All subprocess calls (docker exec iptables/ip) and filesystem paths are
isolated so these tests run without any live infrastructure.
"""
import os
import sys
import stat
import tempfile
import shutil
import unittest
from unittest.mock import MagicMock, patch, call
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'api'))
import connectivity_manager as cm_module
from connectivity_manager import ConnectivityManager
# ---------------------------------------------------------------------------
# Factory helper
# ---------------------------------------------------------------------------
_SENTINEL = object()
def _make_manager(tmp_dir=None, peer_registry=_SENTINEL, config_manager=None):
"""Build a ConnectivityManager with mocked dependencies.
Pass peer_registry=None explicitly to test the no-registry path.
Omit peer_registry (or pass _SENTINEL) to get a default MagicMock.
"""
if tmp_dir is None:
tmp_dir = tempfile.mkdtemp()
if config_manager is None:
config_manager = MagicMock()
config_manager.get_identity.return_value = {
'cell_name': 'test',
'ip_range': '172.20.0.0/16',
}
if peer_registry is _SENTINEL:
peer_registry = MagicMock()
peer_registry.list_peers.return_value = []
with patch.object(ConnectivityManager, '_subscribe_to_events', lambda self: None):
mgr = ConnectivityManager(
config_manager=config_manager,
peer_registry=peer_registry,
data_dir=tmp_dir,
config_dir=tmp_dir,
)
return mgr
def _mock_subprocess_ok():
"""Return a MagicMock mimicking a successful subprocess.run result."""
return MagicMock(returncode=0, stdout='', stderr='')
# ---------------------------------------------------------------------------
# _validate_wg_conf
# ---------------------------------------------------------------------------
class TestValidateWgConf(unittest.TestCase):
def setUp(self):
self.tmp = tempfile.mkdtemp()
self.mgr = _make_manager(tmp_dir=self.tmp)
def tearDown(self):
shutil.rmtree(self.tmp, ignore_errors=True)
def test_valid_config_passes_and_returns_cleaned_text(self):
conf = "[Interface]\nPrivateKey = abc123\nAddress = 10.99.0.1/24\n\n[Peer]\nPublicKey = xyz\n"
result = self.mgr._validate_wg_conf(conf)
self.assertIn('[Interface]', result)
self.assertIn('PrivateKey', result)
self.assertIn('[Peer]', result)
def test_postupdate_is_stripped_silently(self):
conf = "[Interface]\nPrivateKey = abc\nPostUp = iptables -A FORWARD -j ACCEPT\n"
result = self.mgr._validate_wg_conf(conf)
self.assertNotIn('PostUp', result)
self.assertIn('PrivateKey', result)
def test_postdown_is_stripped_silently(self):
conf = "[Interface]\nPrivateKey = abc\nPostDown = iptables -D FORWARD -j ACCEPT\n"
result = self.mgr._validate_wg_conf(conf)
self.assertNotIn('PostDown', result)
def test_preup_is_stripped_silently(self):
conf = "[Interface]\nPrivateKey = abc\nPreUp = /sbin/modprobe wireguard\n"
result = self.mgr._validate_wg_conf(conf)
self.assertNotIn('PreUp', result)
def test_predown_is_stripped_silently(self):
conf = "[Interface]\nPrivateKey = abc\nPreDown = /sbin/rmmod wireguard\n"
result = self.mgr._validate_wg_conf(conf)
self.assertNotIn('PreDown', result)
def test_interface_wg0_raises_value_error(self):
conf = "[Interface]\nName = wg0\nPrivateKey = abc\n"
with self.assertRaises(ValueError) as ctx:
self.mgr._validate_wg_conf(conf)
self.assertIn('wg0', str(ctx.exception))
def test_interface_wg0_via_interface_key_raises_value_error(self):
# 'Interface = wg0' (not just 'Name = wg0') should also be caught
conf = "[Interface]\nInterface = wg0\nPrivateKey = abc\n"
with self.assertRaises(ValueError):
self.mgr._validate_wg_conf(conf)
def test_interface_wg_ext0_passes(self):
conf = "[Interface]\nName = wg_ext0\nPrivateKey = abc\nAddress = 10.99.0.1/24\n"
result = self.mgr._validate_wg_conf(conf)
self.assertIn('wg_ext0', result)
def test_non_string_input_raises_value_error(self):
with self.assertRaises(ValueError):
self.mgr._validate_wg_conf(None)
def test_result_ends_with_newline(self):
conf = "[Interface]\nPrivateKey = abc\n"
result = self.mgr._validate_wg_conf(conf)
self.assertTrue(result.endswith('\n'))
def test_multiple_hooks_all_stripped(self):
conf = (
"[Interface]\n"
"PrivateKey = abc\n"
"PostUp = cmd1\n"
"PostDown = cmd2\n"
"PreUp = cmd3\n"
"PreDown = cmd4\n"
)
result = self.mgr._validate_wg_conf(conf)
for hook in ('PostUp', 'PostDown', 'PreUp', 'PreDown'):
self.assertNotIn(hook, result)
self.assertIn('PrivateKey', result)
# ---------------------------------------------------------------------------
# _validate_ovpn
# ---------------------------------------------------------------------------
class TestValidateOvpn(unittest.TestCase):
def setUp(self):
self.tmp = tempfile.mkdtemp()
self.mgr = _make_manager(tmp_dir=self.tmp)
def tearDown(self):
shutil.rmtree(self.tmp, ignore_errors=True)
def _base_conf(self, extra=''):
return f"client\ndev tun\nproto udp\nremote vpn.example.com 1194\n{extra}"
def test_valid_ovpn_passes(self):
conf = self._base_conf()
result = self.mgr._validate_ovpn(conf)
self.assertIn('proto udp', result)
self.assertIn('remote vpn.example.com 1194', result)
def test_up_script_is_stripped(self):
conf = self._base_conf('up /sbin/connect.sh\n')
result = self.mgr._validate_ovpn(conf)
self.assertNotIn('up /sbin/connect.sh', result)
def test_down_script_is_stripped(self):
conf = self._base_conf('down /sbin/disconnect.sh\n')
result = self.mgr._validate_ovpn(conf)
self.assertNotIn('down /sbin/disconnect.sh', result)
def test_script_security_is_stripped(self):
conf = self._base_conf('script-security 2\n')
result = self.mgr._validate_ovpn(conf)
self.assertNotIn('script-security', result)
def test_plugin_is_stripped(self):
conf = self._base_conf('plugin /path/to/plugin.so\n')
result = self.mgr._validate_ovpn(conf)
self.assertNotIn('plugin', result)
def test_route_up_is_stripped(self):
conf = self._base_conf('route-up /sbin/route_cmd\n')
result = self.mgr._validate_ovpn(conf)
self.assertNotIn('route-up', result)
def test_route_pre_down_is_stripped(self):
conf = self._base_conf('route-pre-down /sbin/cleanup\n')
result = self.mgr._validate_ovpn(conf)
self.assertNotIn('route-pre-down', result)
def test_proto_udp_is_preserved(self):
conf = self._base_conf()
result = self.mgr._validate_ovpn(conf)
self.assertIn('proto udp', result)
def test_remote_directive_is_preserved(self):
conf = self._base_conf()
result = self.mgr._validate_ovpn(conf)
self.assertIn('remote vpn.example.com 1194', result)
def test_comments_are_preserved(self):
conf = self._base_conf('# this is a comment\n')
result = self.mgr._validate_ovpn(conf)
self.assertIn('# this is a comment', result)
def test_non_string_input_raises_value_error(self):
with self.assertRaises(ValueError):
self.mgr._validate_ovpn(42)
def test_result_ends_with_newline(self):
conf = self._base_conf()
result = self.mgr._validate_ovpn(conf)
self.assertTrue(result.endswith('\n'))
def test_all_forbidden_directives_stripped_together(self):
conf = self._base_conf(
'up /s\ndown /s\nscript-security 2\nplugin /p\nroute-up /r\nroute-pre-down /r\n'
)
result = self.mgr._validate_ovpn(conf)
for directive in ('up ', 'down ', 'script-security', 'plugin', 'route-up', 'route-pre-down'):
self.assertNotIn(directive, result)
# Safe directives survive
self.assertIn('proto udp', result)
# ---------------------------------------------------------------------------
# upload_wireguard_ext
# ---------------------------------------------------------------------------
class TestUploadWireguardExt(unittest.TestCase):
def setUp(self):
self.tmp = tempfile.mkdtemp()
self.mgr = _make_manager(tmp_dir=self.tmp)
def tearDown(self):
shutil.rmtree(self.tmp, ignore_errors=True)
def _valid_conf(self):
return "[Interface]\nPrivateKey = abc\nAddress = 10.99.0.1/24\n\n[Peer]\nPublicKey = xyz\n"
def test_valid_conf_returns_ok_true(self):
result = self.mgr.upload_wireguard_ext(self._valid_conf())
self.assertTrue(result['ok'])
def test_valid_conf_writes_file_to_correct_path(self):
self.mgr.upload_wireguard_ext(self._valid_conf())
expected = os.path.join(self.tmp, 'connectivity', 'wireguard_ext', 'wg_ext0.conf')
self.assertTrue(os.path.isfile(expected), f'Expected file at {expected}')
def test_valid_conf_file_has_mode_0600(self):
self.mgr.upload_wireguard_ext(self._valid_conf())
path = os.path.join(self.tmp, 'connectivity', 'wireguard_ext', 'wg_ext0.conf')
mode = stat.S_IMODE(os.stat(path).st_mode)
self.assertEqual(mode, 0o600, f'Expected 0600, got {oct(mode)}')
def test_wg0_interface_returns_ok_false_with_error(self):
bad_conf = "[Interface]\nName = wg0\nPrivateKey = abc\n"
result = self.mgr.upload_wireguard_ext(bad_conf)
self.assertFalse(result['ok'])
self.assertIn('error', result)
self.assertIn('wg0', result['error'])
def test_file_content_has_hooks_stripped(self):
conf = "[Interface]\nPrivateKey = abc\nPostUp = evil\n"
self.mgr.upload_wireguard_ext(conf)
path = os.path.join(self.tmp, 'connectivity', 'wireguard_ext', 'wg_ext0.conf')
with open(path) as f:
content = f.read()
self.assertNotIn('PostUp', content)
self.assertIn('PrivateKey', content)
# ---------------------------------------------------------------------------
# upload_openvpn
# ---------------------------------------------------------------------------
class TestUploadOpenvpn(unittest.TestCase):
def setUp(self):
self.tmp = tempfile.mkdtemp()
self.mgr = _make_manager(tmp_dir=self.tmp)
def tearDown(self):
shutil.rmtree(self.tmp, ignore_errors=True)
def _valid_ovpn(self):
return "client\ndev tun\nproto udp\nremote vpn.example.com 1194\n"
def test_valid_name_and_conf_returns_ok_true(self):
result = self.mgr.upload_openvpn(self._valid_ovpn(), name='my-vpn')
self.assertTrue(result['ok'])
def test_valid_conf_writes_file_at_correct_path(self):
self.mgr.upload_openvpn(self._valid_ovpn(), name='my-vpn')
expected = os.path.join(self.tmp, 'connectivity', 'openvpn', 'my-vpn.ovpn')
self.assertTrue(os.path.isfile(expected), f'Expected file at {expected}')
def test_valid_conf_file_has_mode_0600(self):
self.mgr.upload_openvpn(self._valid_ovpn(), name='my-vpn')
path = os.path.join(self.tmp, 'connectivity', 'openvpn', 'my-vpn.ovpn')
mode = stat.S_IMODE(os.stat(path).st_mode)
self.assertEqual(mode, 0o600, f'Expected 0600, got {oct(mode)}')
def test_name_with_spaces_returns_ok_false(self):
result = self.mgr.upload_openvpn(self._valid_ovpn(), name='my vpn')
self.assertFalse(result['ok'])
self.assertIn('error', result)
def test_name_with_slash_returns_ok_false(self):
result = self.mgr.upload_openvpn(self._valid_ovpn(), name='../evil')
self.assertFalse(result['ok'])
def test_name_with_uppercase_returns_ok_false(self):
result = self.mgr.upload_openvpn(self._valid_ovpn(), name='MyVPN')
self.assertFalse(result['ok'])
def test_name_too_long_returns_ok_false(self):
long_name = 'a' * 33
result = self.mgr.upload_openvpn(self._valid_ovpn(), name=long_name)
self.assertFalse(result['ok'])
def test_valid_hyphenated_name_passes(self):
result = self.mgr.upload_openvpn(self._valid_ovpn(), name='my-vpn')
self.assertTrue(result['ok'])
def test_valid_underscore_name_passes(self):
result = self.mgr.upload_openvpn(self._valid_ovpn(), name='my_vpn')
self.assertTrue(result['ok'])
def test_default_name_default_passes(self):
result = self.mgr.upload_openvpn(self._valid_ovpn())
self.assertTrue(result['ok'])
expected = os.path.join(self.tmp, 'connectivity', 'openvpn', 'default.ovpn')
self.assertTrue(os.path.isfile(expected))
def test_hooks_stripped_from_stored_file(self):
conf = "client\ndev tun\nup /sbin/bad.sh\nproto udp\n"
self.mgr.upload_openvpn(conf, name='clean')
path = os.path.join(self.tmp, 'connectivity', 'openvpn', 'clean.ovpn')
with open(path) as f:
content = f.read()
self.assertNotIn('up /sbin/bad.sh', content)
self.assertIn('proto udp', content)
# ---------------------------------------------------------------------------
# get_status
# ---------------------------------------------------------------------------
class TestGetStatus(unittest.TestCase):
def setUp(self):
self.tmp = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp, ignore_errors=True)
def _mgr_with_subprocess_ok(self):
mgr = _make_manager(tmp_dir=self.tmp)
return mgr
def test_returns_dict(self):
mgr = self._mgr_with_subprocess_ok()
with patch.object(cm_module, 'subprocess') as mock_sp:
mock_sp.run.return_value = _mock_subprocess_ok()
status = mgr.get_status()
self.assertIsInstance(status, dict)
def test_service_key_equals_connectivity(self):
mgr = self._mgr_with_subprocess_ok()
with patch.object(cm_module, 'subprocess') as mock_sp:
mock_sp.run.return_value = _mock_subprocess_ok()
status = mgr.get_status()
self.assertEqual(status['service'], 'connectivity')
def test_running_key_present_and_true(self):
mgr = self._mgr_with_subprocess_ok()
with patch.object(cm_module, 'subprocess') as mock_sp:
mock_sp.run.return_value = _mock_subprocess_ok()
status = mgr.get_status()
self.assertIn('running', status)
self.assertTrue(status['running'])
def test_exits_key_present(self):
mgr = self._mgr_with_subprocess_ok()
with patch.object(cm_module, 'subprocess') as mock_sp:
mock_sp.run.return_value = _mock_subprocess_ok()
status = mgr.get_status()
self.assertIn('exits', status)
# ---------------------------------------------------------------------------
# list_exits
# ---------------------------------------------------------------------------
class TestListExits(unittest.TestCase):
def setUp(self):
self.tmp = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp, ignore_errors=True)
def test_returns_list(self):
mgr = _make_manager(tmp_dir=self.tmp)
with patch.object(cm_module, 'subprocess') as mock_sp:
mock_sp.run.return_value = _mock_subprocess_ok()
exits = mgr.list_exits()
self.assertIsInstance(exits, list)
def test_each_item_has_type_field(self):
mgr = _make_manager(tmp_dir=self.tmp)
with patch.object(cm_module, 'subprocess') as mock_sp:
mock_sp.run.return_value = _mock_subprocess_ok()
exits = mgr.list_exits()
for item in exits:
self.assertIn('type', item, f'Missing "type" in {item}')
def test_each_item_has_status_fields(self):
mgr = _make_manager(tmp_dir=self.tmp)
with patch.object(cm_module, 'subprocess') as mock_sp:
mock_sp.run.return_value = _mock_subprocess_ok()
exits = mgr.list_exits()
for item in exits:
# _exit_status returns configured + iface_up (or subset)
self.assertIn('configured', item, f'Missing "configured" in {item}')
def test_default_not_in_exit_list(self):
mgr = _make_manager(tmp_dir=self.tmp)
with patch.object(cm_module, 'subprocess') as mock_sp:
mock_sp.run.return_value = _mock_subprocess_ok()
exits = mgr.list_exits()
types = [e['type'] for e in exits]
self.assertNotIn('default', types)
def test_list_contains_wireguard_ext_openvpn_tor(self):
mgr = _make_manager(tmp_dir=self.tmp)
with patch.object(cm_module, 'subprocess') as mock_sp:
mock_sp.run.return_value = _mock_subprocess_ok()
exits = mgr.list_exits()
types = {e['type'] for e in exits}
self.assertIn('wireguard_ext', types)
self.assertIn('openvpn', types)
self.assertIn('tor', types)
# ---------------------------------------------------------------------------
# set_peer_exit
# ---------------------------------------------------------------------------
class TestSetPeerExit(unittest.TestCase):
def setUp(self):
self.tmp = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp, ignore_errors=True)
def _mgr(self, peer_registry=None):
if peer_registry is None:
peer_registry = MagicMock()
peer_registry.set_peer_exit_via.return_value = True
peer_registry.list_peers.return_value = []
return _make_manager(tmp_dir=self.tmp, peer_registry=peer_registry)
def test_valid_exit_type_returns_ok_true(self):
mgr = self._mgr()
with patch.object(cm_module, 'subprocess') as mock_sp:
mock_sp.run.return_value = _mock_subprocess_ok()
result = mgr.set_peer_exit('alice', 'wireguard_ext')
self.assertTrue(result['ok'])
def test_valid_exit_type_default_returns_ok_true(self):
mgr = self._mgr()
with patch.object(cm_module, 'subprocess') as mock_sp:
mock_sp.run.return_value = _mock_subprocess_ok()
result = mgr.set_peer_exit('alice', 'default')
self.assertTrue(result['ok'])
def test_invalid_exit_type_returns_ok_false(self):
mgr = self._mgr()
result = mgr.set_peer_exit('alice', 'shadowsocks')
self.assertFalse(result['ok'])
self.assertIn('error', result)
def test_invalid_exit_type_error_mentions_type(self):
mgr = self._mgr()
result = mgr.set_peer_exit('alice', 'bad_type')
self.assertIn('bad_type', result['error'])
def test_calls_peer_registry_set_peer_exit_via_with_correct_args(self):
pr = MagicMock()
pr.set_peer_exit_via.return_value = True
pr.list_peers.return_value = []
mgr = self._mgr(peer_registry=pr)
with patch.object(cm_module, 'subprocess') as mock_sp:
mock_sp.run.return_value = _mock_subprocess_ok()
mgr.set_peer_exit('bob', 'openvpn')
pr.set_peer_exit_via.assert_called_once_with('bob', 'openvpn')
def test_peer_not_found_in_registry_returns_ok_false(self):
pr = MagicMock()
pr.set_peer_exit_via.return_value = False # peer not found
pr.list_peers.return_value = []
mgr = self._mgr(peer_registry=pr)
result = mgr.set_peer_exit('unknown-peer', 'tor')
self.assertFalse(result['ok'])
self.assertIn('error', result)
def test_invalid_peer_name_returns_ok_false(self):
mgr = self._mgr()
result = mgr.set_peer_exit('peer with spaces!', 'default')
self.assertFalse(result['ok'])
def test_no_peer_registry_returns_ok_false(self):
mgr = _make_manager(tmp_dir=self.tmp, peer_registry=None)
result = mgr.set_peer_exit('alice', 'wireguard_ext')
self.assertFalse(result['ok'])
self.assertIn('error', result)
# ---------------------------------------------------------------------------
# get_peer_exits
# ---------------------------------------------------------------------------
class TestGetPeerExits(unittest.TestCase):
def setUp(self):
self.tmp = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp, ignore_errors=True)
def test_returns_dict(self):
mgr = _make_manager(tmp_dir=self.tmp)
result = mgr.get_peer_exits()
self.assertIsInstance(result, dict)
def test_maps_peer_names_to_exit_types(self):
pr = MagicMock()
pr.list_peers.return_value = [
{'peer': 'alice', 'exit_via': 'wireguard_ext'},
{'peer': 'bob', 'exit_via': 'default'},
]
mgr = _make_manager(tmp_dir=self.tmp, peer_registry=pr)
result = mgr.get_peer_exits()
self.assertEqual(result['alice'], 'wireguard_ext')
self.assertEqual(result['bob'], 'default')
def test_peer_without_exit_via_defaults_to_default(self):
pr = MagicMock()
pr.list_peers.return_value = [{'peer': 'charlie'}]
mgr = _make_manager(tmp_dir=self.tmp, peer_registry=pr)
result = mgr.get_peer_exits()
self.assertEqual(result['charlie'], 'default')
def test_calls_peer_registry_list_peers(self):
pr = MagicMock()
pr.list_peers.return_value = []
mgr = _make_manager(tmp_dir=self.tmp, peer_registry=pr)
mgr.get_peer_exits()
pr.list_peers.assert_called()
def test_no_peer_registry_returns_empty_dict(self):
mgr = _make_manager(tmp_dir=self.tmp, peer_registry=None)
result = mgr.get_peer_exits()
self.assertEqual(result, {})
def test_empty_peer_list_returns_empty_dict(self):
pr = MagicMock()
pr.list_peers.return_value = []
mgr = _make_manager(tmp_dir=self.tmp, peer_registry=pr)
result = mgr.get_peer_exits()
self.assertEqual(result, {})
# ---------------------------------------------------------------------------
# apply_routes
# ---------------------------------------------------------------------------
class TestApplyRoutes(unittest.TestCase):
def setUp(self):
self.tmp = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp, ignore_errors=True)
def test_returns_dict_with_ok_key(self):
mgr = _make_manager(tmp_dir=self.tmp)
with patch.object(cm_module, 'subprocess') as mock_sp:
mock_sp.run.return_value = _mock_subprocess_ok()
result = mgr.apply_routes()
self.assertIsInstance(result, dict)
self.assertIn('ok', result)
def test_returns_ok_true_on_success(self):
mgr = _make_manager(tmp_dir=self.tmp)
with patch.object(cm_module, 'subprocess') as mock_sp:
mock_sp.run.return_value = _mock_subprocess_ok()
result = mgr.apply_routes()
self.assertTrue(result['ok'])
def test_calls_ensure_chains(self):
mgr = _make_manager(tmp_dir=self.tmp)
with patch.object(mgr, '_ensure_chains') as mock_ensure, \
patch.object(cm_module, 'subprocess') as mock_sp:
mock_sp.run.return_value = _mock_subprocess_ok()
mgr.apply_routes()
mock_ensure.assert_called_once()
def test_calls_subprocess_for_iptables(self):
mgr = _make_manager(tmp_dir=self.tmp)
with patch.object(cm_module, 'subprocess') as mock_sp:
mock_sp.run.return_value = _mock_subprocess_ok()
mgr.apply_routes()
self.assertTrue(mock_sp.run.called)
# At least one call should involve 'iptables'
calls_str = str(mock_sp.run.call_args_list)
self.assertIn('iptables', calls_str)
def test_subprocess_failure_is_non_fatal_returns_ok_true(self):
"""apply_routes must not raise even when every subprocess call fails."""
mgr = _make_manager(tmp_dir=self.tmp)
with patch.object(cm_module, 'subprocess') as mock_sp:
mock_sp.run.return_value = MagicMock(returncode=1, stdout='', stderr='error')
result = mgr.apply_routes()
# Must not raise; should still return a dict (ok may be True because
# routing errors are logged as warnings, not propagated)
self.assertIsInstance(result, dict)
self.assertIn('ok', result)
def test_ensure_chains_exception_is_non_fatal(self):
"""If _ensure_chains raises, apply_routes still returns a dict."""
mgr = _make_manager(tmp_dir=self.tmp)
with patch.object(mgr, '_ensure_chains', side_effect=RuntimeError('chain error')), \
patch.object(cm_module, 'subprocess') as mock_sp:
mock_sp.run.return_value = _mock_subprocess_ok()
result = mgr.apply_routes()
self.assertIsInstance(result, dict)
def test_peer_with_wireguard_ext_exit_generates_mark_rule(self):
"""Peers with a non-default exit should trigger _add_mark_rule calls."""
pr = MagicMock()
pr.list_peers.return_value = [
{'peer': 'alice', 'exit_via': 'wireguard_ext'},
]
pr.get_peer.return_value = {'peer': 'alice', 'ip': '172.20.0.50/32'}
mgr = _make_manager(tmp_dir=self.tmp, peer_registry=pr)
with patch.object(mgr, '_add_mark_rule') as mock_mark, \
patch.object(cm_module, 'subprocess') as mock_sp:
mock_sp.run.return_value = _mock_subprocess_ok()
mgr.apply_routes()
mock_mark.assert_called()
call_args = mock_mark.call_args[0]
self.assertEqual(call_args[0], '172.20.0.50') # IP without CIDR
def test_peer_with_default_exit_skips_mark_rule(self):
"""Peers on default exit must not generate mark rules."""
pr = MagicMock()
pr.list_peers.return_value = [
{'peer': 'bob', 'exit_via': 'default'},
]
mgr = _make_manager(tmp_dir=self.tmp, peer_registry=pr)
with patch.object(mgr, '_add_mark_rule') as mock_mark, \
patch.object(cm_module, 'subprocess') as mock_sp:
mock_sp.run.return_value = _mock_subprocess_ok()
mgr.apply_routes()
mock_mark.assert_not_called()
def test_rules_applied_count_in_result(self):
mgr = _make_manager(tmp_dir=self.tmp)
with patch.object(cm_module, 'subprocess') as mock_sp:
mock_sp.run.return_value = _mock_subprocess_ok()
result = mgr.apply_routes()
self.assertIn('rules_applied', result)
self.assertIsInstance(result['rules_applied'], int)
if __name__ == '__main__':
unittest.main()
+510
View File
@@ -0,0 +1,510 @@
"""Tests for DDNSManager and DDNS provider classes."""
import os
import sys
import threading
import time
import unittest
from unittest.mock import MagicMock, patch, call
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'api'))
from ddns_manager import (
DDNSManager,
DDNSProvider,
DDNSError,
PicNgoDDNS,
CloudflareDDNS,
DuckDNSDDNS,
NoIPDDNS,
FreeDNSDDNS,
_get_public_ip,
)
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _make_response(status_code=200, json_data=None, text=''):
"""Build a minimal requests.Response-like mock."""
resp = MagicMock()
resp.ok = (200 <= status_code < 300)
resp.status_code = status_code
resp.json.return_value = json_data or {}
resp.text = text
return resp
def _make_config_manager(ddns_cfg=None, domain_cfg=None):
"""Return a mock config_manager whose get_identity() returns a useful dict."""
cm = MagicMock()
if ddns_cfg is not None:
identity = {'domain': {'ddns': ddns_cfg}}
elif domain_cfg is not None:
identity = {'domain': domain_cfg}
else:
identity = {}
cm.get_identity.return_value = identity
return cm
# ---------------------------------------------------------------------------
# PicNgoDDNS tests
# ---------------------------------------------------------------------------
class TestPicNgoDDNSRegister(unittest.TestCase):
"""PicNgoDDNS.register() calls the correct URL with the correct body."""
def test_register_calls_correct_url(self):
provider = PicNgoDDNS(api_base_url='https://ddns.example.com')
mock_resp = _make_response(200, json_data={'token': 'tok123', 'subdomain': 'alpha'})
with patch('requests.post', return_value=mock_resp) as mock_post:
result = provider.register('alpha', '1.2.3.4')
mock_post.assert_called_once()
args, kwargs = mock_post.call_args
self.assertEqual(args[0], 'https://ddns.example.com/api/v1/register')
self.assertEqual(kwargs['json'], {'name': 'alpha', 'ip': '1.2.3.4'})
self.assertEqual(result, {'token': 'tok123', 'subdomain': 'alpha'})
def test_register_raises_ddns_error_on_http_error(self):
provider = PicNgoDDNS()
mock_resp = _make_response(500, text='Internal Server Error')
with patch('requests.post', return_value=mock_resp):
with self.assertRaises(DDNSError):
provider.register('alpha', '1.2.3.4')
def test_register_no_token_in_header(self):
"""register() must NOT send an Authorization header (no token yet)."""
provider = PicNgoDDNS()
mock_resp = _make_response(200, json_data={'token': 't', 'subdomain': 'x'})
with patch('requests.post', return_value=mock_resp) as mock_post:
provider.register('x', '1.2.3.4')
_, kwargs = mock_post.call_args
self.assertNotIn('Authorization', kwargs.get('headers', {}))
class TestPicNgoDDNSUpdate(unittest.TestCase):
"""PicNgoDDNS.update() calls the correct URL with Authorization header."""
def test_update_uses_bearer_token(self):
provider = PicNgoDDNS(api_base_url='https://ddns.example.com')
mock_resp = _make_response(200)
with patch('requests.put', return_value=mock_resp) as mock_put:
result = provider.update('mytoken', '5.6.7.8')
mock_put.assert_called_once()
args, kwargs = mock_put.call_args
self.assertEqual(args[0], 'https://ddns.example.com/api/v1/update')
self.assertIn('Authorization', kwargs['headers'])
self.assertEqual(kwargs['headers']['Authorization'], 'Bearer mytoken')
self.assertEqual(kwargs['json'], {'ip': '5.6.7.8'})
self.assertTrue(result)
def test_update_raises_ddns_error_on_failure(self):
provider = PicNgoDDNS()
mock_resp = _make_response(403, text='Forbidden')
with patch('requests.put', return_value=mock_resp):
with self.assertRaises(DDNSError):
provider.update('badtoken', '1.2.3.4')
class TestPicNgoDDNSChallenges(unittest.TestCase):
"""PicNgoDDNS.dns_challenge_create/delete call correct endpoints."""
def test_dns_challenge_create_calls_post(self):
provider = PicNgoDDNS(api_base_url='https://ddns.example.com')
mock_resp = _make_response(200)
with patch('requests.post', return_value=mock_resp) as mock_post:
result = provider.dns_challenge_create('tok', '_acme.alpha.pic.ngo', 'abc123')
mock_post.assert_called_once()
args, kwargs = mock_post.call_args
self.assertEqual(args[0], 'https://ddns.example.com/api/v1/dns-challenge')
self.assertEqual(kwargs['json'], {'fqdn': '_acme.alpha.pic.ngo', 'value': 'abc123'})
self.assertEqual(kwargs['headers']['Authorization'], 'Bearer tok')
self.assertTrue(result)
def test_dns_challenge_delete_calls_delete(self):
provider = PicNgoDDNS(api_base_url='https://ddns.example.com')
mock_resp = _make_response(200)
with patch('requests.delete', return_value=mock_resp) as mock_del:
result = provider.dns_challenge_delete('tok', '_acme.alpha.pic.ngo')
mock_del.assert_called_once()
args, kwargs = mock_del.call_args
self.assertEqual(args[0], 'https://ddns.example.com/api/v1/dns-challenge')
self.assertEqual(kwargs['json'], {'fqdn': '_acme.alpha.pic.ngo'})
self.assertEqual(kwargs['headers']['Authorization'], 'Bearer tok')
self.assertTrue(result)
def test_dns_challenge_create_raises_on_error(self):
provider = PicNgoDDNS()
mock_resp = _make_response(500, text='error')
with patch('requests.post', return_value=mock_resp):
with self.assertRaises(DDNSError):
provider.dns_challenge_create('tok', 'fqdn', 'val')
def test_dns_challenge_delete_raises_on_error(self):
provider = PicNgoDDNS()
mock_resp = _make_response(404, text='not found')
with patch('requests.delete', return_value=mock_resp):
with self.assertRaises(DDNSError):
provider.dns_challenge_delete('tok', 'fqdn')
# ---------------------------------------------------------------------------
# DDNSManager.get_provider() tests
# ---------------------------------------------------------------------------
class TestGetProvider(unittest.TestCase):
"""DDNSManager.get_provider() returns the correct provider class."""
def test_returns_pic_ngo_provider(self):
cm = _make_config_manager(ddns_cfg={'provider': 'pic_ngo'})
mgr = DDNSManager(config_manager=cm)
provider = mgr.get_provider()
self.assertIsInstance(provider, PicNgoDDNS)
def test_returns_none_when_no_ddns_config(self):
cm = _make_config_manager() # empty identity
mgr = DDNSManager(config_manager=cm)
provider = mgr.get_provider()
self.assertIsNone(provider)
def test_returns_none_when_no_provider_key(self):
cm = _make_config_manager(ddns_cfg={})
mgr = DDNSManager(config_manager=cm)
provider = mgr.get_provider()
self.assertIsNone(provider)
def test_returns_cloudflare_provider(self):
cm = _make_config_manager(ddns_cfg={
'provider': 'cloudflare',
'api_token': 'cf_tok',
'zone_id': 'zid',
})
mgr = DDNSManager(config_manager=cm)
provider = mgr.get_provider()
self.assertIsInstance(provider, CloudflareDDNS)
def test_returns_duckdns_provider(self):
cm = _make_config_manager(ddns_cfg={
'provider': 'duckdns',
'token': 'duck_tok',
'domain': 'mypic',
})
mgr = DDNSManager(config_manager=cm)
provider = mgr.get_provider()
self.assertIsInstance(provider, DuckDNSDDNS)
def test_returns_noip_provider(self):
cm = _make_config_manager(ddns_cfg={'provider': 'noip'})
mgr = DDNSManager(config_manager=cm)
provider = mgr.get_provider()
self.assertIsInstance(provider, NoIPDDNS)
def test_returns_freedns_provider(self):
cm = _make_config_manager(ddns_cfg={'provider': 'freedns'})
mgr = DDNSManager(config_manager=cm)
provider = mgr.get_provider()
self.assertIsInstance(provider, FreeDNSDDNS)
def test_returns_none_for_unknown_provider(self):
cm = _make_config_manager(ddns_cfg={'provider': 'nonexistent'})
mgr = DDNSManager(config_manager=cm)
provider = mgr.get_provider()
self.assertIsNone(provider)
def test_uses_custom_api_base_url(self):
cm = _make_config_manager(ddns_cfg={
'provider': 'pic_ngo',
'api_base_url': 'https://custom.example.com',
})
mgr = DDNSManager(config_manager=cm)
provider = mgr.get_provider()
self.assertIsInstance(provider, PicNgoDDNS)
self.assertEqual(provider.api_base_url, 'https://custom.example.com')
# ---------------------------------------------------------------------------
# DDNSManager.update_ip() tests
# ---------------------------------------------------------------------------
class TestUpdateIp(unittest.TestCase):
"""DDNSManager.update_ip() calls provider.update() only when IP changed."""
def _make_manager_with_mock_provider(self, token='tok', last_ip=None):
cm = _make_config_manager(ddns_cfg={'provider': 'pic_ngo', 'token': token})
mgr = DDNSManager(config_manager=cm)
mgr._last_ip = last_ip
mock_provider = MagicMock()
mock_provider.update.return_value = True
mgr.get_provider = MagicMock(return_value=mock_provider)
return mgr, mock_provider
def test_update_when_ip_changed(self):
mgr, mock_provider = self._make_manager_with_mock_provider(last_ip='1.1.1.1')
with patch('ddns_manager._get_public_ip', return_value='2.2.2.2'):
mgr.update_ip()
mock_provider.update.assert_called_once_with('tok', '2.2.2.2')
self.assertEqual(mgr._last_ip, '2.2.2.2')
def test_skips_update_when_ip_unchanged(self):
mgr, mock_provider = self._make_manager_with_mock_provider(last_ip='3.3.3.3')
with patch('ddns_manager._get_public_ip', return_value='3.3.3.3'):
mgr.update_ip()
mock_provider.update.assert_not_called()
self.assertEqual(mgr._last_ip, '3.3.3.3')
def test_skips_update_when_no_provider(self):
cm = _make_config_manager()
mgr = DDNSManager(config_manager=cm)
mgr._last_ip = None
# Should not raise, just silently skip
with patch('ddns_manager._get_public_ip', return_value='1.2.3.4'):
mgr.update_ip()
def test_skips_update_when_ip_unreachable(self):
mgr, mock_provider = self._make_manager_with_mock_provider(last_ip=None)
with patch('ddns_manager._get_public_ip', return_value=None):
mgr.update_ip()
mock_provider.update.assert_not_called()
def test_last_ip_not_updated_when_provider_returns_false(self):
mgr, mock_provider = self._make_manager_with_mock_provider(last_ip='1.1.1.1')
mock_provider.update.return_value = False
with patch('ddns_manager._get_public_ip', return_value='9.9.9.9'):
mgr.update_ip()
# IP should not be cached when provider says False
self.assertEqual(mgr._last_ip, '1.1.1.1')
def test_ddns_error_is_caught_not_propagated(self):
mgr, mock_provider = self._make_manager_with_mock_provider(last_ip='1.1.1.1')
mock_provider.update.side_effect = DDNSError("server error")
with patch('ddns_manager._get_public_ip', return_value='5.5.5.5'):
# Should not raise
mgr.update_ip()
# ---------------------------------------------------------------------------
# DDNSManager.register() tests
# ---------------------------------------------------------------------------
class TestRegister(unittest.TestCase):
def test_register_stores_token_in_config(self):
cm = _make_config_manager(ddns_cfg={'provider': 'pic_ngo'})
mgr = DDNSManager(config_manager=cm)
mock_provider = MagicMock()
mock_provider.register.return_value = {'token': 'new_tok', 'subdomain': 'alpha'}
mgr.get_provider = MagicMock(return_value=mock_provider)
result = mgr.register('alpha', '1.2.3.4')
self.assertEqual(result['token'], 'new_tok')
# set_identity_field('domain', ...) should have been called
cm.set_identity_field.assert_called_once()
field_name, field_value = cm.set_identity_field.call_args[0]
self.assertEqual(field_name, 'domain')
self.assertEqual(field_value['ddns']['token'], 'new_tok')
def test_register_raises_when_no_provider(self):
cm = _make_config_manager()
mgr = DDNSManager(config_manager=cm)
with self.assertRaises(DDNSError):
mgr.register('alpha', '1.2.3.4')
# ---------------------------------------------------------------------------
# DDNSManager.dns_challenge_create/delete delegation tests
# ---------------------------------------------------------------------------
class TestDnsChallenges(unittest.TestCase):
def _make_manager(self):
cm = _make_config_manager(ddns_cfg={'provider': 'pic_ngo', 'token': 'tok'})
mgr = DDNSManager(config_manager=cm)
mock_provider = MagicMock()
mock_provider.dns_challenge_create.return_value = True
mock_provider.dns_challenge_delete.return_value = True
mgr.get_provider = MagicMock(return_value=mock_provider)
return mgr, mock_provider
def test_dns_challenge_create_delegates(self):
mgr, mock_provider = self._make_manager()
mgr.dns_challenge_create('_acme.alpha.pic.ngo', 'val123')
mock_provider.dns_challenge_create.assert_called_once_with(
'tok', '_acme.alpha.pic.ngo', 'val123'
)
def test_dns_challenge_delete_delegates(self):
mgr, mock_provider = self._make_manager()
mgr.dns_challenge_delete('_acme.alpha.pic.ngo')
mock_provider.dns_challenge_delete.assert_called_once_with(
'tok', '_acme.alpha.pic.ngo'
)
def test_dns_challenge_create_raises_when_no_provider(self):
cm = _make_config_manager()
mgr = DDNSManager(config_manager=cm)
with self.assertRaises(DDNSError):
mgr.dns_challenge_create('fqdn', 'val')
def test_dns_challenge_delete_raises_when_no_provider(self):
cm = _make_config_manager()
mgr = DDNSManager(config_manager=cm)
with self.assertRaises(DDNSError):
mgr.dns_challenge_delete('fqdn')
# ---------------------------------------------------------------------------
# Background heartbeat thread tests
# ---------------------------------------------------------------------------
class TestHeartbeat(unittest.TestCase):
"""Background heartbeat thread starts, runs, and can be stopped cleanly."""
def test_heartbeat_starts(self):
cm = _make_config_manager(ddns_cfg={'provider': 'pic_ngo', 'token': 'tok'})
mgr = DDNSManager(config_manager=cm)
mgr.update_ip = MagicMock() # avoid real network
mgr.start_heartbeat()
try:
self.assertIsNotNone(mgr._heartbeat_thread)
self.assertTrue(mgr._heartbeat_thread.is_alive())
finally:
mgr.stop_heartbeat()
def test_heartbeat_can_be_stopped(self):
cm = _make_config_manager(ddns_cfg={'provider': 'pic_ngo', 'token': 'tok'})
mgr = DDNSManager(config_manager=cm)
mgr.update_ip = MagicMock()
mgr.start_heartbeat()
mgr.stop_heartbeat()
# Thread should be dead after stop
if mgr._heartbeat_thread is not None:
self.assertFalse(mgr._heartbeat_thread.is_alive())
def test_start_heartbeat_is_idempotent(self):
"""Calling start_heartbeat() twice should not create a second thread."""
cm = _make_config_manager(ddns_cfg={'provider': 'pic_ngo', 'token': 'tok'})
mgr = DDNSManager(config_manager=cm)
mgr.update_ip = MagicMock()
mgr.start_heartbeat()
thread1 = mgr._heartbeat_thread
mgr.start_heartbeat()
thread2 = mgr._heartbeat_thread
try:
self.assertIs(thread1, thread2)
finally:
mgr.stop_heartbeat()
def test_heartbeat_calls_update_ip(self):
"""Heartbeat loop must invoke update_ip() at least once."""
cm = _make_config_manager(ddns_cfg={'provider': 'pic_ngo', 'token': 'tok'})
mgr = DDNSManager(config_manager=cm)
called_event = threading.Event()
def _fake_update_ip():
called_event.set()
mgr.update_ip = _fake_update_ip
mgr.start_heartbeat()
called = called_event.wait(timeout=3)
mgr.stop_heartbeat()
self.assertTrue(called, "update_ip() was not called within 3 seconds")
def test_heartbeat_survives_exception_in_update_ip(self):
"""An exception in update_ip() must not crash the heartbeat thread."""
cm = _make_config_manager(ddns_cfg={'provider': 'pic_ngo', 'token': 'tok'})
mgr = DDNSManager(config_manager=cm)
call_count = [0]
survived_event = threading.Event()
def _flaky_update_ip():
call_count[0] += 1
if call_count[0] == 1:
raise RuntimeError("transient failure")
survived_event.set()
mgr.update_ip = _flaky_update_ip
# Patch the interval to be 0 so the loop spins immediately
import ddns_manager as _dm
original_interval = _dm._HEARTBEAT_INTERVAL
_dm._HEARTBEAT_INTERVAL = 0
try:
mgr.start_heartbeat()
survived = survived_event.wait(timeout=5)
mgr.stop_heartbeat()
self.assertTrue(survived, "Thread did not survive exception in update_ip()")
finally:
_dm._HEARTBEAT_INTERVAL = original_interval
# ---------------------------------------------------------------------------
# get_status() and test_connectivity() smoke tests
# ---------------------------------------------------------------------------
class TestGetStatus(unittest.TestCase):
def test_get_status_returns_dict(self):
cm = _make_config_manager(ddns_cfg={'provider': 'pic_ngo'})
mgr = DDNSManager(config_manager=cm)
status = mgr.get_status()
self.assertIn('service', status)
self.assertEqual(status['service'], 'ddns')
self.assertIn('provider', status)
self.assertEqual(status['provider'], 'pic_ngo')
def test_get_status_no_config(self):
cm = _make_config_manager()
mgr = DDNSManager(config_manager=cm)
status = mgr.get_status()
self.assertIsNone(status['provider'])
def test_test_connectivity_no_provider(self):
cm = _make_config_manager()
mgr = DDNSManager(config_manager=cm)
result = mgr.test_connectivity()
self.assertFalse(result['success'])
def test_test_connectivity_with_provider(self):
cm = _make_config_manager(ddns_cfg={'provider': 'pic_ngo'})
mgr = DDNSManager(config_manager=cm)
with patch('ddns_manager._get_public_ip', return_value='1.2.3.4'):
result = mgr.test_connectivity()
self.assertTrue(result['success'])
self.assertEqual(result['public_ip'], '1.2.3.4')
# ---------------------------------------------------------------------------
# _get_public_ip helper tests
# ---------------------------------------------------------------------------
class TestGetPublicIp(unittest.TestCase):
def test_returns_ip_on_success(self):
mock_resp = MagicMock()
mock_resp.ok = True
mock_resp.text = ' 1.2.3.4 '
with patch('requests.get', return_value=mock_resp):
result = _get_public_ip()
self.assertEqual(result, '1.2.3.4')
def test_returns_none_on_failure(self):
with patch('requests.get', side_effect=Exception('network error')):
result = _get_public_ip()
self.assertIsNone(result)
def test_returns_none_on_non_ok_response(self):
mock_resp = MagicMock()
mock_resp.ok = False
with patch('requests.get', return_value=mock_resp):
result = _get_public_ip()
self.assertIsNone(result)
if __name__ == '__main__':
unittest.main()
+68 -17
View File
@@ -560,7 +560,8 @@ class TestCellRules(unittest.TestCase):
with patch.object(firewall_manager, '_wg_exec', side_effect=fake_wg_exec), \
patch.object(firewall_manager, '_get_cell_api_ip', return_value=self._FAKE_API_IP), \
patch.object(firewall_manager, '_get_caddy_container_ip', return_value=self._FAKE_CADDY_IP), \
patch.object(firewall_manager, '_get_dns_container_ip', return_value=self._FAKE_DNS_IP):
patch.object(firewall_manager, '_get_dns_container_ip', return_value=self._FAKE_DNS_IP), \
patch.object(firewall_manager, 'ensure_forward_stateful', return_value=True):
firewall_manager.apply_cell_rules(cell_name, vpn_subnet, inbound_services)
return [c for c in calls_made if 'iptables' in c]
@@ -650,7 +651,8 @@ class TestCellRules(unittest.TestCase):
with patch.object(firewall_manager, '_wg_exec', side_effect=fake_wg_exec), \
patch.object(firewall_manager, '_get_cell_api_ip', return_value='172.20.0.10'), \
patch.object(firewall_manager, '_get_caddy_container_ip', return_value='172.20.0.2'), \
patch.object(firewall_manager, '_get_dns_container_ip', return_value='172.20.0.3'):
patch.object(firewall_manager, '_get_dns_container_ip', return_value='172.20.0.3'), \
patch.object(firewall_manager, 'ensure_forward_stateful', return_value=True):
firewall_manager.apply_cell_rules('office', '10.0.1.0/24', [])
# The API-sync ACCEPT must be the LAST -I FORWARD insertion so it sits at position 1
@@ -793,7 +795,7 @@ class TestCellRules(unittest.TestCase):
class TestEnsureCellApiDnat(unittest.TestCase):
"""Tests for ensure_cell_api_dnat — DNAT wg0:3000 → cell-api:3000."""
"""Tests for ensure_cell_api_dnat — DNAT wg0:3000 (scoped) → cell-api:3000."""
def _wg_exec_no_existing_rules(self, args):
r = MagicMock()
@@ -815,7 +817,8 @@ class TestEnsureCellApiDnat(unittest.TestCase):
return r
def test_dnat_rules_added_when_not_present(self):
with patch.object(firewall_manager, '_run', return_value=self._inspect_ok()), \
with patch.object(firewall_manager, '_get_wg_server_ip', return_value='10.0.0.1'), \
patch.object(firewall_manager, '_run', return_value=self._inspect_ok()), \
patch.object(firewall_manager, '_wg_exec',
side_effect=self._wg_exec_no_existing_rules) as wg_mock:
result = firewall_manager.ensure_cell_api_dnat()
@@ -825,8 +828,23 @@ class TestEnsureCellApiDnat(unittest.TestCase):
dnat_adds = [a for a in calls_args if 'DNAT' in a and '-A' in a]
self.assertTrue(len(dnat_adds) >= 1, 'DNAT -A rule must be added')
def test_dnat_is_scoped_to_server_ip(self):
"""DNAT rule must include -d <server_ip> to avoid intercepting cross-cell traffic."""
with patch.object(firewall_manager, '_get_wg_server_ip', return_value='10.0.0.1'), \
patch.object(firewall_manager, '_run', return_value=self._inspect_ok()), \
patch.object(firewall_manager, '_wg_exec',
side_effect=self._wg_exec_no_existing_rules) as wg_mock:
firewall_manager.ensure_cell_api_dnat()
all_args = [c.args[0] for c in wg_mock.call_args_list]
dnat_adds = [a for a in all_args if 'DNAT' in a and '-A' in a]
for rule in dnat_adds:
self.assertIn('10.0.0.1', rule, 'DNAT rule must be scoped to server IP')
self.assertIn('-d', rule, 'DNAT rule must use -d to scope to server IP')
def test_dnat_skipped_if_already_present(self):
with patch.object(firewall_manager, '_run', return_value=self._inspect_ok()), \
with patch.object(firewall_manager, '_get_wg_server_ip', return_value='10.0.0.1'), \
patch.object(firewall_manager, '_run', return_value=self._inspect_ok()), \
patch.object(firewall_manager, '_wg_exec',
side_effect=self._wg_exec_all_rules_exist) as wg_mock:
result = firewall_manager.ensure_cell_api_dnat()
@@ -836,16 +854,23 @@ class TestEnsureCellApiDnat(unittest.TestCase):
add_calls = [a for a in calls_args if '-A' in a or '-I' in a]
self.assertEqual(len(add_calls), 0, 'No rules should be added when they already exist')
def test_returns_false_when_wg_server_ip_not_found(self):
with patch.object(firewall_manager, '_get_wg_server_ip', return_value=None):
result = firewall_manager.ensure_cell_api_dnat()
self.assertFalse(result)
def test_returns_false_when_cell_api_not_found(self):
r = MagicMock()
r.returncode = 0
r.stdout = ''
with patch.object(firewall_manager, '_run', return_value=r):
with patch.object(firewall_manager, '_get_wg_server_ip', return_value='10.0.0.1'), \
patch.object(firewall_manager, '_run', return_value=r):
result = firewall_manager.ensure_cell_api_dnat()
self.assertFalse(result)
def test_returns_false_on_exception(self):
with patch.object(firewall_manager, '_run', side_effect=RuntimeError('docker gone')):
with patch.object(firewall_manager, '_get_wg_server_ip', return_value='10.0.0.1'), \
patch.object(firewall_manager, '_run', side_effect=RuntimeError('docker gone')):
result = firewall_manager.ensure_cell_api_dnat()
self.assertFalse(result)
@@ -934,40 +959,66 @@ class TestReconcileStale(unittest.TestCase):
# ---------------------------------------------------------------------------
class TestEnsureForwardStateful(unittest.TestCase):
"""ensure_forward_stateful must insert ESTABLISHED,RELATED ACCEPT only once."""
"""ensure_forward_stateful deletes any existing copies then re-inserts at position 1."""
def _make_exec(self, already_present=False):
def _make_exec(self, existing_copies=0):
"""Return (calls_list, fake_wg_exec).
The fake simulates *existing_copies* existing ESTABLISHED,RELATED rules.
Each -D call with returncode 0 "removes" one copy; once they are all gone
subsequent -D calls return 1 (rule not found). All other calls succeed.
"""
calls = []
state = {'remaining': existing_copies}
def fake_wg_exec(args):
calls.append(args)
r = MagicMock()
# -C (check) returns 0 if present, 1 if not
if '-C' in args:
r.returncode = 0 if already_present else 1
r.stdout = ''
if '-D' in args:
if state['remaining'] > 0:
state['remaining'] -= 1
r.returncode = 0 # deletion succeeded
else:
r.returncode = 1 # nothing left to delete
else:
r.returncode = 0
r.stdout = ''
return r
return calls, fake_wg_exec
def test_inserts_rule_when_not_present(self):
calls, fake = self._make_exec(already_present=False)
"""With no pre-existing rule the -D loop exits immediately and -I inserts once."""
calls, fake = self._make_exec(existing_copies=0)
with patch.object(firewall_manager, '_wg_exec', side_effect=fake):
result = firewall_manager.ensure_forward_stateful()
self.assertTrue(result)
# Exactly one -D attempt (returns 1 straight away, loop body never ran)
delete_calls = [c for c in calls if '-D' in c]
self.assertEqual(len(delete_calls), 1)
# Exactly one -I insert
insert_calls = [c for c in calls if '-I' in c]
self.assertEqual(len(insert_calls), 1)
flat = ' '.join(insert_calls[0])
self.assertIn('ESTABLISHED,RELATED', flat)
self.assertIn('ACCEPT', flat)
def test_skips_insert_when_already_present(self):
calls, fake = self._make_exec(already_present=True)
def test_deletes_existing_and_reinserts(self):
"""With 2 stale copies the loop deletes both, then inserts once at position 1."""
calls, fake = self._make_exec(existing_copies=2)
with patch.object(firewall_manager, '_wg_exec', side_effect=fake):
result = firewall_manager.ensure_forward_stateful()
self.assertTrue(result)
# Two successful -D calls to drain existing rules, one more that fails
delete_calls = [c for c in calls if '-D' in c]
self.assertEqual(len(delete_calls), 3) # 2 succeed + 1 fails (loop exit)
# Exactly one -I insert anchored at position 1
insert_calls = [c for c in calls if '-I' in c]
self.assertEqual(len(insert_calls), 0, "Must not insert duplicate rule")
self.assertEqual(len(insert_calls), 1)
flat = ' '.join(insert_calls[0])
self.assertIn('1', flat)
self.assertIn('ESTABLISHED,RELATED', flat)
self.assertIn('ACCEPT', flat)
def test_apply_cell_rules_calls_ensure_forward_stateful(self):
"""apply_cell_rules must call ensure_forward_stateful so replies are never dropped."""
File diff suppressed because it is too large Load Diff
+301
View File
@@ -0,0 +1,301 @@
#!/usr/bin/env python3
"""
Unit tests for SetupManager (api/setup_manager.py).
Config manager and auth manager are injected as MagicMock objects so no
filesystem access or Docker calls are needed.
"""
import os
import sys
from pathlib import Path
from unittest.mock import MagicMock, call, patch
import pytest
sys.path.insert(0, str(Path(__file__).parent.parent / 'api'))
from setup_manager import SetupManager, AVAILABLE_SERVICES, AVAILABLE_TIMEZONES
# ── fixtures ──────────────────────────────────────────────────────────────────
@pytest.fixture
def mock_config_manager():
"""A MagicMock standing in for ConfigManager."""
mgr = MagicMock()
# Default: setup not yet complete
mgr.get_identity.return_value = {}
return mgr
@pytest.fixture
def mock_auth_manager():
"""A MagicMock standing in for AuthManager."""
mgr = MagicMock()
mgr.create_user.return_value = True
return mgr
@pytest.fixture
def setup_manager(mock_config_manager, mock_auth_manager):
"""SetupManager wired to both mocks."""
return SetupManager(mock_config_manager, mock_auth_manager)
# ── valid payload helper ───────────────────────────────────────────────────────
def _valid_payload(**overrides):
base = {
'cell_name': 'mycel',
'password': 'SecurePass1!',
'domain_mode': 'lan',
'timezone': 'UTC',
'services_enabled': ['wireguard'],
'ddns_provider': 'none',
}
base.update(overrides)
return base
# ── is_setup_complete ─────────────────────────────────────────────────────────
def test_is_setup_complete_missing_key_returns_false(setup_manager, mock_config_manager):
mock_config_manager.get_identity.return_value = {}
assert setup_manager.is_setup_complete() is False
def test_is_setup_complete_false_value_returns_false(setup_manager, mock_config_manager):
mock_config_manager.get_identity.return_value = {'setup_complete': False}
assert setup_manager.is_setup_complete() is False
def test_is_setup_complete_true_value_returns_true(setup_manager, mock_config_manager):
mock_config_manager.get_identity.return_value = {'setup_complete': True}
assert setup_manager.is_setup_complete() is True
# ── validate_cell_name ────────────────────────────────────────────────────────
@pytest.mark.parametrize('name', ['mycel', 'my-cel', 'a1', 'abc-123-xyz'])
def test_validate_cell_name_accepts_valid_names(setup_manager, name):
assert setup_manager.validate_cell_name(name) == []
def test_validate_cell_name_rejects_empty_string(setup_manager):
errors = setup_manager.validate_cell_name('')
assert errors
assert any('required' in e.lower() for e in errors)
def test_validate_cell_name_rejects_starts_with_digit(setup_manager):
errors = setup_manager.validate_cell_name('1abc')
assert errors
def test_validate_cell_name_rejects_starts_with_hyphen(setup_manager):
errors = setup_manager.validate_cell_name('-abc')
assert errors
def test_validate_cell_name_rejects_ends_with_hyphen(setup_manager):
errors = setup_manager.validate_cell_name('abc-')
assert errors
def test_validate_cell_name_rejects_uppercase(setup_manager):
errors = setup_manager.validate_cell_name('MyCell')
assert errors
def test_validate_cell_name_rejects_underscore(setup_manager):
errors = setup_manager.validate_cell_name('my_cell')
assert errors
def test_validate_cell_name_rejects_dot(setup_manager):
errors = setup_manager.validate_cell_name('my.cell')
assert errors
def test_validate_cell_name_rejects_too_short_single_char(setup_manager):
# Single character: regex requires at least 2 chars (start + 1-30 more)
errors = setup_manager.validate_cell_name('a')
assert errors
def test_validate_cell_name_rejects_too_long(setup_manager):
# 32 lowercase letters — one over the 31-char limit
errors = setup_manager.validate_cell_name('a' * 32)
assert errors
def test_validate_cell_name_accepts_maximum_length(setup_manager):
# 31 chars: 'a' + 30 more lowercase = exactly at limit
assert setup_manager.validate_cell_name('a' + 'b' * 30) == []
# ── validate_password ─────────────────────────────────────────────────────────
def test_validate_password_accepts_valid_password(setup_manager):
assert setup_manager.validate_password('SecurePass1!') == []
def test_validate_password_rejects_too_short(setup_manager):
errors = setup_manager.validate_password('Short1!')
assert errors
assert any('12' in e or 'least' in e.lower() for e in errors)
def test_validate_password_rejects_no_uppercase(setup_manager):
errors = setup_manager.validate_password('securepass1!')
assert errors
assert any('uppercase' in e.lower() for e in errors)
def test_validate_password_rejects_no_lowercase(setup_manager):
errors = setup_manager.validate_password('SECUREPASS1!')
assert errors
assert any('lowercase' in e.lower() for e in errors)
def test_validate_password_rejects_no_digit(setup_manager):
errors = setup_manager.validate_password('SecurePassword!')
assert errors
assert any('digit' in e.lower() for e in errors)
# ── complete_setup ────────────────────────────────────────────────────────────
def test_complete_setup_returns_error_when_cell_name_invalid(setup_manager):
result = setup_manager.complete_setup(_valid_payload(cell_name='1bad'))
assert result['success'] is False
assert result['errors']
def test_complete_setup_returns_error_when_password_invalid(setup_manager):
result = setup_manager.complete_setup(_valid_payload(password='weak'))
assert result['success'] is False
assert result['errors']
def test_complete_setup_returns_error_when_domain_mode_invalid(setup_manager):
result = setup_manager.complete_setup(_valid_payload(domain_mode='ftp'))
assert result['success'] is False
assert any('domain_mode' in e for e in result['errors'])
def test_complete_setup_calls_create_user_with_correct_args(
setup_manager, mock_auth_manager, mock_config_manager, tmp_path):
mock_config_manager.get_identity.return_value = {}
with patch.dict(os.environ, {'DATA_DIR': str(tmp_path)}):
result = setup_manager.complete_setup(_valid_payload())
mock_auth_manager.create_user.assert_called_once_with(
username='admin',
password='SecurePass1!',
role='admin',
)
def test_complete_setup_calls_set_identity_field_for_each_field(
setup_manager, mock_config_manager, mock_auth_manager, tmp_path):
mock_config_manager.get_identity.return_value = {}
with patch.dict(os.environ, {'DATA_DIR': str(tmp_path)}):
setup_manager.complete_setup(_valid_payload())
calls = mock_config_manager.set_identity_field.call_args_list
field_names = [c[0][0] for c in calls]
for expected in ('cell_name', 'domain_mode', 'timezone', 'services_enabled', 'ddns_provider'):
assert expected in field_names, f"set_identity_field not called for '{expected}'"
def test_complete_setup_marks_setup_complete_last(
setup_manager, mock_config_manager, mock_auth_manager, tmp_path):
"""setup_complete must be the final set_identity_field call."""
mock_config_manager.get_identity.return_value = {}
with patch.dict(os.environ, {'DATA_DIR': str(tmp_path)}):
setup_manager.complete_setup(_valid_payload())
calls = mock_config_manager.set_identity_field.call_args_list
last_call = calls[-1]
assert last_call == call('setup_complete', True)
def test_complete_setup_returns_success_redirect_on_valid_payload(
setup_manager, mock_config_manager, mock_auth_manager, tmp_path):
mock_config_manager.get_identity.return_value = {}
with patch.dict(os.environ, {'DATA_DIR': str(tmp_path)}):
result = setup_manager.complete_setup(_valid_payload())
assert result == {'success': True, 'redirect': '/login'}
def test_complete_setup_returns_error_when_already_complete(
setup_manager, mock_config_manager, tmp_path):
"""If setup is already done when the lock-protected re-check runs, return error."""
# complete_setup calls is_setup_complete() exactly once — inside the lock.
# Returning True there triggers the "already completed" guard.
mock_config_manager.get_identity.return_value = {'setup_complete': True}
with patch.dict(os.environ, {'DATA_DIR': str(tmp_path)}):
result = setup_manager.complete_setup(_valid_payload())
assert result['success'] is False
assert any('already' in e.lower() for e in result['errors'])
def test_complete_setup_does_not_persist_fields_when_already_complete(
setup_manager, mock_config_manager, mock_auth_manager, tmp_path):
"""No side-effects (no create_user, no set_identity_field) when already done."""
mock_config_manager.get_identity.return_value = {'setup_complete': True}
with patch.dict(os.environ, {'DATA_DIR': str(tmp_path)}):
setup_manager.complete_setup(_valid_payload())
mock_auth_manager.create_user.assert_not_called()
mock_config_manager.set_identity_field.assert_not_called()
def test_complete_setup_returns_error_when_create_user_fails(
setup_manager, mock_config_manager, mock_auth_manager, tmp_path):
mock_config_manager.get_identity.return_value = {}
mock_auth_manager.create_user.return_value = False
with patch.dict(os.environ, {'DATA_DIR': str(tmp_path)}):
result = setup_manager.complete_setup(_valid_payload())
assert result['success'] is False
assert any('admin' in e.lower() or 'user' in e.lower() for e in result['errors'])
# ── get_setup_status ──────────────────────────────────────────────────────────
def test_get_setup_status_returns_complete_key(setup_manager, mock_config_manager):
mock_config_manager.get_identity.return_value = {}
status = setup_manager.get_setup_status()
assert 'complete' in status
assert status['complete'] is False
def test_get_setup_status_complete_reflects_true_when_done(setup_manager, mock_config_manager):
mock_config_manager.get_identity.return_value = {'setup_complete': True}
status = setup_manager.get_setup_status()
assert status['complete'] is True
def test_get_setup_status_contains_available_services(setup_manager, mock_config_manager):
mock_config_manager.get_identity.return_value = {}
status = setup_manager.get_setup_status()
assert 'available_services' in status
assert isinstance(status['available_services'], list)
assert status['available_services'] == AVAILABLE_SERVICES
def test_get_setup_status_contains_available_timezones(setup_manager, mock_config_manager):
mock_config_manager.get_identity.return_value = {}
status = setup_manager.get_setup_status()
assert 'available_timezones' in status
assert isinstance(status['available_timezones'], list)
assert len(status['available_timezones']) > 0
def test_get_setup_status_timezones_includes_utc(setup_manager, mock_config_manager):
mock_config_manager.get_identity.return_value = {}
status = setup_manager.get_setup_status()
assert 'UTC' in status['available_timezones']
def test_get_setup_status_timezones_match_module_constant(setup_manager, mock_config_manager):
mock_config_manager.get_identity.return_value = {}
status = setup_manager.get_setup_status()
assert status['available_timezones'] == AVAILABLE_TIMEZONES
+12
View File
@@ -12,6 +12,7 @@ import {
Wifi,
Server,
Key,
Package,
Package2,
Settings as SettingsIcon,
Link2,
@@ -42,6 +43,10 @@ import Login from './pages/Login';
import AccountSettings from './pages/AccountSettings';
import PeerDashboard from './pages/PeerDashboard';
import MyServices from './pages/MyServices';
import Store from './pages/Store';
import Connectivity from './pages/Connectivity';
import Setup from './pages/Setup';
import SetupGuard from './components/SetupGuard';
function PendingRestartBanner({ pending, onApply, onCancel }) {
const [confirming, setConfirming] = useState(false);
@@ -236,7 +241,9 @@ function AppCore() {
{ name: 'Routing', href: '/routing', icon: Wifi },
{ name: 'Vault', href: '/vault', icon: Key },
{ name: 'Containers', href: '/containers', icon: Package2 },
{ name: 'Store', href: '/store', icon: Package },
{ name: 'Cell Network', href: '/cell-network', icon: Link2 },
{ name: 'Connectivity', href: '/connectivity', icon: Network },
{ name: 'Logs', href: '/logs', icon: Activity },
{ name: 'Settings', href: '/settings', icon: SettingsIcon },
{ name: 'Account', href: '/account', icon: User },
@@ -264,7 +271,9 @@ function AppCore() {
return (
<Router>
<SetupGuard>
<Routes>
<Route path="/setup" element={<Setup />} />
<Route path="/login" element={<Login />} />
<Route path="*" element={
<ConfigProvider>
@@ -339,7 +348,9 @@ function AppCore() {
<Route path="/routing" element={<PrivateRoute requireRole="admin"><Routing /></PrivateRoute>} />
<Route path="/vault" element={<PrivateRoute requireRole="admin"><Vault /></PrivateRoute>} />
<Route path="/containers" element={<PrivateRoute requireRole="admin"><ContainerDashboard /></PrivateRoute>} />
<Route path="/store" element={<PrivateRoute requireRole="admin"><Store /></PrivateRoute>} />
<Route path="/cell-network" element={<PrivateRoute requireRole="admin"><CellNetwork /></PrivateRoute>} />
<Route path="/connectivity" element={<PrivateRoute requireRole="admin"><Connectivity /></PrivateRoute>} />
<Route path="/logs" element={<PrivateRoute requireRole="admin"><Logs /></PrivateRoute>} />
<Route path="/settings" element={<PrivateRoute requireRole="admin"><Settings /></PrivateRoute>} />
</Routes>
@@ -350,6 +361,7 @@ function AppCore() {
</ConfigProvider>
} />
</Routes>
</SetupGuard>
</Router>
);
}
+43
View File
@@ -0,0 +1,43 @@
import React, { useState, useEffect } from 'react';
import { Navigate, useLocation } from 'react-router-dom';
import { setupAPI } from '../services/api';
export default function SetupGuard({ children }) {
const location = useLocation();
const [status, setStatus] = useState(null); // null = loading, true = complete, false = incomplete
const [error, setError] = useState(false);
useEffect(() => {
setupAPI.getStatus()
.then(r => setStatus(r.data?.complete === true))
.catch(() => {
// If the setup endpoint doesn't exist yet, treat setup as complete
// so the rest of the app functions normally.
setStatus(true);
setError(true);
});
}, []);
// Still loading show nothing to avoid flash of wrong content
if (status === null) {
return (
<div className="min-h-screen bg-gray-950 flex items-center justify-center">
<div className="animate-spin rounded-full h-8 w-8 border-b-2 border-blue-500" />
</div>
);
}
const onSetupPage = location.pathname === '/setup';
// Setup incomplete and not already on /setup redirect there
if (status === false && !onSetupPage) {
return <Navigate to="/setup" replace />;
}
// Setup complete but user navigated to /setup send to login
if (status === true && onSetupPage) {
return <Navigate to="/login" replace />;
}
return children;
}
+693
View File
@@ -0,0 +1,693 @@
import { useState, useEffect, useCallback } from 'react';
import {
Shield,
Lock,
Globe,
RefreshCw,
CheckCircle,
AlertCircle,
ChevronDown,
Upload,
ToggleLeft,
ToggleRight,
} from 'lucide-react';
import { connectivityAPI, wireguardAPI } from '../services/api';
// Toast helpers (same pattern as Store.jsx)
function toastEvent(msg, type = 'success') {
window.dispatchEvent(
new CustomEvent('connectivity-toast', { detail: { msg, type } })
);
}
function Toast({ toasts }) {
return (
<div className="fixed bottom-4 right-4 z-50 space-y-2 pointer-events-none">
{toasts.map((t) => (
<div
key={t.id}
className={`px-4 py-3 rounded-lg shadow-lg text-sm text-white flex items-center gap-2 pointer-events-auto ${
t.type === 'success'
? 'bg-green-600'
: t.type === 'error'
? 'bg-red-600'
: 'bg-yellow-600'
}`}
>
{t.type === 'success' ? (
<CheckCircle className="h-4 w-4 shrink-0" />
) : (
<AlertCircle className="h-4 w-4 shrink-0" />
)}
{t.msg}
</div>
))}
</div>
);
}
function useToasts() {
const [toasts, setToasts] = useState([]);
useEffect(() => {
const handler = (e) => {
const id = Date.now();
setToasts((prev) => [...prev, { ...e.detail, id }]);
setTimeout(
() => setToasts((prev) => prev.filter((t) => t.id !== id)),
3000
);
};
window.addEventListener('connectivity-toast', handler);
return () => window.removeEventListener('connectivity-toast', handler);
}, []);
return toasts;
}
// Status badge
function StatusBadge({ status }) {
if (status === 'active') {
return (
<span className="inline-flex items-center gap-1 text-xs font-medium text-green-700 bg-green-50 border border-green-200 rounded-full px-2 py-0.5">
<CheckCircle className="h-3 w-3" />
Active
</span>
);
}
if (status === 'configured') {
return (
<span className="inline-flex items-center gap-1 text-xs font-medium text-yellow-700 bg-yellow-50 border border-yellow-200 rounded-full px-2 py-0.5">
<AlertCircle className="h-3 w-3" />
Configured
</span>
);
}
if (status === 'error') {
return (
<span className="inline-flex items-center gap-1 text-xs font-medium text-red-700 bg-red-50 border border-red-200 rounded-full px-2 py-0.5">
<AlertCircle className="h-3 w-3" />
Error
</span>
);
}
// not configured
return (
<span className="inline-flex items-center gap-1 text-xs font-medium text-gray-500 bg-gray-100 border border-gray-200 rounded-full px-2 py-0.5">
Not configured
</span>
);
}
// WireGuard External card
function WireguardExitCard({ exitInfo, onUploaded }) {
const [confText, setConfText] = useState('');
const [uploading, setUploading] = useState(false);
const status = exitInfo?.status || 'not_configured';
const handleUpload = async () => {
if (!confText.trim()) return;
setUploading(true);
try {
await connectivityAPI.uploadWireguard(confText.trim());
toastEvent('WireGuard config uploaded');
setConfText('');
onUploaded();
} catch (err) {
const msg =
err.response?.data?.error ||
err.response?.data?.message ||
'Failed to upload WireGuard config';
toastEvent(msg, 'error');
} finally {
setUploading(false);
}
};
return (
<div className="bg-white rounded-lg border border-gray-200 p-6 flex flex-col gap-4">
<div className="flex items-start justify-between gap-3">
<div className="flex items-center gap-3">
<div className="flex items-center justify-center w-10 h-10 rounded-lg bg-primary-50 shrink-0">
<Shield className="h-5 w-5 text-primary-600" />
</div>
<div>
<h3 className="font-semibold text-gray-900">WireGuard External</h3>
<p className="text-sm text-gray-500">
Route traffic through an external WireGuard VPN tunnel
</p>
</div>
</div>
<StatusBadge status={status} />
</div>
<div className="flex flex-col gap-1.5">
<label
htmlFor="wg-conf"
className="text-sm font-medium text-gray-700"
>
Paste .conf file contents
</label>
<textarea
id="wg-conf"
value={confText}
onChange={(e) => setConfText(e.target.value)}
placeholder="[Interface]&#10;PrivateKey = ...&#10;&#10;[Peer]&#10;PublicKey = ..."
rows={6}
className="w-full rounded-md border border-gray-300 px-3 py-2 font-mono text-xs text-gray-800 placeholder:text-gray-400 focus:outline-none focus:ring-2 focus:ring-primary-500 focus:border-primary-500 resize-y"
aria-describedby="wg-conf-hint"
/>
<p id="wg-conf-hint" className="text-xs text-gray-400">
Drag-and-drop not available paste the file text directly.
</p>
</div>
<div className="flex justify-end pt-2 border-t border-gray-100">
<button
onClick={handleUpload}
disabled={uploading || !confText.trim()}
className="flex items-center gap-1.5 px-4 py-2 text-sm font-medium text-white bg-primary-600 hover:bg-primary-700 disabled:opacity-50 disabled:cursor-not-allowed rounded-md transition-colors"
aria-label="Upload WireGuard config"
>
{uploading ? (
<RefreshCw className="h-4 w-4 animate-spin" />
) : (
<Upload className="h-4 w-4" />
)}
{uploading ? 'Uploading…' : 'Upload Config'}
</button>
</div>
</div>
);
}
// OpenVPN card
function OpenvpnExitCard({ exitInfo, onUploaded }) {
const [ovpnText, setOvpnText] = useState('');
const [profileName, setProfileName] = useState('default');
const [uploading, setUploading] = useState(false);
const status = exitInfo?.status || 'not_configured';
const nameInvalid = profileName.trim() === '';
const handleUpload = async () => {
if (!ovpnText.trim() || nameInvalid) return;
setUploading(true);
try {
await connectivityAPI.uploadOpenvpn(ovpnText.trim(), profileName.trim());
toastEvent('OpenVPN config uploaded');
setOvpnText('');
onUploaded();
} catch (err) {
const msg =
err.response?.data?.error ||
err.response?.data?.message ||
'Failed to upload OpenVPN config';
toastEvent(msg, 'error');
} finally {
setUploading(false);
}
};
return (
<div className="bg-white rounded-lg border border-gray-200 p-6 flex flex-col gap-4">
<div className="flex items-start justify-between gap-3">
<div className="flex items-center gap-3">
<div className="flex items-center justify-center w-10 h-10 rounded-lg bg-indigo-50 shrink-0">
<Lock className="h-5 w-5 text-indigo-600" />
</div>
<div>
<h3 className="font-semibold text-gray-900">OpenVPN</h3>
<p className="text-sm text-gray-500">
Route traffic through an OpenVPN tunnel
</p>
</div>
</div>
<StatusBadge status={status} />
</div>
<div className="flex flex-col gap-1.5">
<label
htmlFor="ovpn-name"
className="text-sm font-medium text-gray-700"
>
Profile name <span className="text-red-500" aria-hidden="true">*</span>
</label>
<input
id="ovpn-name"
type="text"
value={profileName}
onChange={(e) => setProfileName(e.target.value)}
placeholder="default"
className={`w-full rounded-md border px-3 py-2 text-sm text-gray-800 placeholder:text-gray-400 focus:outline-none focus:ring-2 focus:ring-primary-500 ${
nameInvalid
? 'border-red-300 focus:ring-red-400 focus:border-red-400'
: 'border-gray-300 focus:border-primary-500'
}`}
aria-required="true"
aria-describedby={nameInvalid ? 'ovpn-name-error' : undefined}
/>
{nameInvalid && (
<p id="ovpn-name-error" className="text-xs text-red-600" role="alert">
Profile name is required
</p>
)}
</div>
<div className="flex flex-col gap-1.5">
<label
htmlFor="ovpn-conf"
className="text-sm font-medium text-gray-700"
>
Paste .ovpn file contents
</label>
<textarea
id="ovpn-conf"
value={ovpnText}
onChange={(e) => setOvpnText(e.target.value)}
placeholder="client&#10;dev tun&#10;proto udp&#10;remote ..."
rows={6}
className="w-full rounded-md border border-gray-300 px-3 py-2 font-mono text-xs text-gray-800 placeholder:text-gray-400 focus:outline-none focus:ring-2 focus:ring-primary-500 focus:border-primary-500 resize-y"
/>
</div>
<div className="flex justify-end pt-2 border-t border-gray-100">
<button
onClick={handleUpload}
disabled={uploading || !ovpnText.trim() || nameInvalid}
className="flex items-center gap-1.5 px-4 py-2 text-sm font-medium text-white bg-primary-600 hover:bg-primary-700 disabled:opacity-50 disabled:cursor-not-allowed rounded-md transition-colors"
aria-label="Upload OpenVPN config"
>
{uploading ? (
<RefreshCw className="h-4 w-4 animate-spin" />
) : (
<Upload className="h-4 w-4" />
)}
{uploading ? 'Uploading…' : 'Upload Config'}
</button>
</div>
</div>
);
}
// Tor card
function TorExitCard({ exitInfo, onToggled }) {
const [toggling, setToggling] = useState(false);
const status = exitInfo?.status || 'not_configured';
const isEnabled = status === 'active' || status === 'configured';
const handleToggle = async () => {
setToggling(true);
try {
// Tor doesn't need a config upload apply routes enables/disables it
await connectivityAPI.applyRoutes();
toastEvent(isEnabled ? 'Tor exit disabled' : 'Tor exit enabled');
onToggled();
} catch (err) {
const msg =
err.response?.data?.error ||
err.response?.data?.message ||
'Failed to toggle Tor';
toastEvent(msg, 'error');
} finally {
setToggling(false);
}
};
return (
<div className="bg-white rounded-lg border border-gray-200 p-6 flex flex-col gap-4">
<div className="flex items-start justify-between gap-3">
<div className="flex items-center gap-3">
<div className="flex items-center justify-center w-10 h-10 rounded-lg bg-purple-50 shrink-0">
<Globe className="h-5 w-5 text-purple-600" />
</div>
<div>
<h3 className="font-semibold text-gray-900">Tor</h3>
<p className="text-sm text-gray-500">
Route selected peers through the Tor anonymity network
</p>
</div>
</div>
<StatusBadge status={status} />
</div>
<p className="text-sm text-gray-500">
No configuration file required. Toggle the exit on or off peers
assigned to Tor will have their traffic routed accordingly.
</p>
<div className="flex justify-end pt-2 border-t border-gray-100">
<button
onClick={handleToggle}
disabled={toggling}
className={`flex items-center gap-2 px-4 py-2 text-sm font-medium rounded-md transition-colors disabled:opacity-50 disabled:cursor-not-allowed ${
isEnabled
? 'text-gray-700 bg-gray-100 hover:bg-gray-200'
: 'text-white bg-primary-600 hover:bg-primary-700'
}`}
aria-label={isEnabled ? 'Disable Tor exit' : 'Enable Tor exit'}
aria-pressed={isEnabled}
>
{toggling ? (
<RefreshCw className="h-4 w-4 animate-spin" />
) : isEnabled ? (
<ToggleRight className="h-4 w-4" />
) : (
<ToggleLeft className="h-4 w-4" />
)}
{toggling ? 'Applying…' : isEnabled ? 'Disable' : 'Enable'}
</button>
</div>
</div>
);
}
// Peer exit row
const EXIT_OPTIONS = [
{ value: 'default', label: 'Default (direct)' },
{ value: 'wireguard', label: 'WireGuard External' },
{ value: 'openvpn', label: 'OpenVPN' },
{ value: 'tor', label: 'Tor' },
];
function PeerExitRow({ peer, currentExit, onSaved }) {
const [selected, setSelected] = useState(currentExit || 'default');
const [saving, setSaving] = useState(false);
const isDirty = selected !== (currentExit || 'default');
const handleSave = async () => {
setSaving(true);
try {
await connectivityAPI.setPeerExit(peer.name, selected);
toastEvent(`Exit for ${peer.name} set to ${selected}`);
onSaved(peer.name, selected);
} catch (err) {
const msg =
err.response?.data?.error ||
err.response?.data?.message ||
`Failed to update exit for ${peer.name}`;
toastEvent(msg, 'error');
} finally {
setSaving(false);
}
};
return (
<tr className="border-t border-gray-100">
<td className="py-3 px-4 text-sm font-medium text-gray-900 truncate max-w-[180px]">
{peer.name}
</td>
<td className="py-3 px-4">
<span className="text-sm text-gray-500">
{EXIT_OPTIONS.find((o) => o.value === (currentExit || 'default'))
?.label || 'Default (direct)'}
</span>
</td>
<td className="py-3 px-4">
<div className="relative inline-block">
<select
value={selected}
onChange={(e) => setSelected(e.target.value)}
className="appearance-none bg-white border border-gray-300 text-sm text-gray-800 rounded-md pl-3 pr-8 py-1.5 focus:outline-none focus:ring-2 focus:ring-primary-500 focus:border-primary-500"
aria-label={`Change exit for ${peer.name}`}
>
{EXIT_OPTIONS.map((opt) => (
<option key={opt.value} value={opt.value}>
{opt.label}
</option>
))}
</select>
<ChevronDown className="pointer-events-none absolute right-2 top-1/2 -translate-y-1/2 h-4 w-4 text-gray-400" />
</div>
</td>
<td className="py-3 px-4 text-right">
<button
onClick={handleSave}
disabled={saving || !isDirty}
className="flex items-center gap-1.5 px-3 py-1.5 text-sm font-medium text-white bg-primary-600 hover:bg-primary-700 disabled:opacity-40 disabled:cursor-not-allowed rounded-md transition-colors ml-auto"
aria-label={`Save exit assignment for ${peer.name}`}
>
{saving && <RefreshCw className="h-3.5 w-3.5 animate-spin" />}
{saving ? 'Saving…' : 'Save'}
</button>
</td>
</tr>
);
}
// Main Connectivity component
function Connectivity() {
const toasts = useToasts();
const [exits, setExits] = useState({}); // keyed by exit type
const [peerExits, setPeerExits] = useState({}); // peer_name -> exit_via
const [peers, setPeers] = useState([]); // WireGuard peer list
const [isLoading, setIsLoading] = useState(true);
const [loadError, setLoadError] = useState(null);
const [applying, setApplying] = useState(false);
const loadAll = useCallback(async () => {
setLoadError(null);
try {
const [exitsRes, peerExitsRes, peersRes] = await Promise.all([
connectivityAPI.listExits().catch(() => ({ data: {} })),
connectivityAPI.getPeerExits().catch(() => ({ data: {} })),
wireguardAPI.getPeers().catch(() => ({ data: { peers: [] } })),
]);
const exitsData = exitsRes.data || {};
// API may return array or object normalise to object keyed by type
if (Array.isArray(exitsData)) {
const map = {};
exitsData.forEach((e) => { map[e.type] = e; });
setExits(map);
} else {
setExits(exitsData);
}
const peerExitsData = peerExitsRes.data || {};
setPeerExits(
Array.isArray(peerExitsData)
? Object.fromEntries(peerExitsData.map((p) => [p.name, p.exit_via]))
: peerExitsData
);
const peersData = peersRes.data;
const peersList = Array.isArray(peersData)
? peersData
: Array.isArray(peersData?.peers)
? peersData.peers
: [];
setPeers(peersList);
} catch (err) {
const msg =
err.response?.data?.error ||
err.response?.data?.message ||
'Could not load connectivity data. Check that the API is reachable.';
setLoadError(msg);
} finally {
setIsLoading(false);
}
}, []);
useEffect(() => {
loadAll();
}, [loadAll]);
const handleApplyRoutes = async () => {
setApplying(true);
try {
await connectivityAPI.applyRoutes();
toastEvent('Routes applied successfully');
await loadAll();
} catch (err) {
const msg =
err.response?.data?.error ||
err.response?.data?.message ||
'Failed to apply routes';
toastEvent(msg, 'error');
} finally {
setApplying(false);
}
};
const handlePeerExitSaved = (peerName, exitVia) => {
setPeerExits((prev) => ({ ...prev, [peerName]: exitVia }));
};
// Render
return (
<div>
<Toast toasts={toasts} />
{/* Page header */}
<div className="mb-6">
<h1 className="text-2xl font-bold text-gray-900">Connectivity</h1>
<p className="mt-1 text-sm text-gray-500">
Configure exit tunnels and control how each peer's traffic is routed
</p>
</div>
{/* Loading skeleton */}
{isLoading && (
<div className="space-y-4 animate-pulse">
<div className="h-6 bg-gray-200 rounded w-48 mb-4" />
<div className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-4">
{[1, 2, 3].map((n) => (
<div
key={n}
className="bg-white rounded-lg border border-gray-200 p-6 h-48"
>
<div className="h-4 bg-gray-200 rounded w-1/2 mb-3" />
<div className="h-3 bg-gray-100 rounded w-3/4 mb-2" />
<div className="h-3 bg-gray-100 rounded w-2/3" />
</div>
))}
</div>
</div>
)}
{/* Error state */}
{!isLoading && loadError && (
<div className="bg-white rounded-lg border border-red-200 bg-red-50 p-6">
<div className="flex items-start gap-3">
<AlertCircle className="h-5 w-5 text-red-500 mt-0.5 shrink-0" />
<div className="flex-1">
<p className="text-sm font-medium text-red-800">
Failed to load connectivity data
</p>
<p className="text-sm text-red-600 mt-1">{loadError}</p>
</div>
<button
onClick={() => { setIsLoading(true); loadAll(); }}
className="btn-secondary text-sm shrink-0"
>
Retry
</button>
</div>
</div>
)}
{/* Main content */}
{!isLoading && !loadError && (
<div className="space-y-10">
{/* Section 1: Exit Tunnels */}
<section>
<div className="mb-4 flex items-center justify-between gap-4">
<div>
<h2 className="text-base font-semibold text-gray-900">
Exit Tunnels
</h2>
<p className="text-sm text-gray-500">
Upload VPN configs or enable Tor to create exit options for your
peers
</p>
</div>
</div>
<div className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-4">
<WireguardExitCard
exitInfo={exits['wireguard'] || exits['wireguard_external']}
onUploaded={loadAll}
/>
<OpenvpnExitCard
exitInfo={exits['openvpn']}
onUploaded={loadAll}
/>
<TorExitCard
exitInfo={exits['tor']}
onToggled={loadAll}
/>
</div>
{/* Apply Routes */}
<div className="mt-6 flex items-center justify-between gap-4 bg-gray-50 border border-gray-200 rounded-lg px-5 py-4">
<div>
<p className="text-sm font-medium text-gray-800">
Apply exit routes
</p>
<p className="text-xs text-gray-500 mt-0.5">
Commit all exit-tunnel changes to the routing table
</p>
</div>
<button
onClick={handleApplyRoutes}
disabled={applying}
className="flex items-center gap-2 px-4 py-2 text-sm font-medium text-white bg-primary-600 hover:bg-primary-700 disabled:opacity-50 disabled:cursor-not-allowed rounded-md transition-colors shrink-0"
aria-label="Apply exit routes"
>
<RefreshCw
className={`h-4 w-4 ${applying ? 'animate-spin' : ''}`}
/>
{applying ? 'Applying…' : 'Apply Routes'}
</button>
</div>
</section>
{/* Section 2: Peer Exit Assignment */}
<section>
<div className="mb-4">
<h2 className="text-base font-semibold text-gray-900">
Peer Exit Assignment
</h2>
<p className="text-sm text-gray-500">
Choose which exit tunnel each WireGuard peer uses
</p>
</div>
{peers.length === 0 ? (
<div className="bg-white rounded-lg border border-gray-200 py-12 text-center">
<Shield className="h-10 w-10 text-gray-300 mx-auto mb-3" />
<p className="text-sm font-medium text-gray-500">
No WireGuard peers found
</p>
<p className="text-xs text-gray-400 mt-1">
Add peers on the WireGuard page first, then return here to
assign exits.
</p>
</div>
) : (
<div className="bg-white rounded-lg border border-gray-200 overflow-x-auto">
<table className="min-w-full">
<thead>
<tr className="bg-gray-50 border-b border-gray-200">
<th className="py-3 px-4 text-left text-xs font-semibold text-gray-500 uppercase tracking-wide">
Peer Name
</th>
<th className="py-3 px-4 text-left text-xs font-semibold text-gray-500 uppercase tracking-wide">
Current Exit
</th>
<th className="py-3 px-4 text-left text-xs font-semibold text-gray-500 uppercase tracking-wide">
Change Exit
</th>
<th className="py-3 px-4" />
</tr>
</thead>
<tbody>
{peers.map((peer) => (
<PeerExitRow
key={peer.name}
peer={peer}
currentExit={peerExits[peer.name] || 'default'}
onSaved={handlePeerExitSaved}
/>
))}
</tbody>
</table>
</div>
)}
</section>
</div>
)}
</div>
);
}
export default Connectivity;
+707
View File
@@ -0,0 +1,707 @@
import React, { useState, useMemo } from 'react';
import { useNavigate } from 'react-router-dom';
import { Eye, EyeOff, CheckCircle, AlertCircle } from 'lucide-react';
import { setupAPI } from '../services/api';
// constants
const TOTAL_STEPS = 7;
const CELL_NAME_RE = /^[a-z][a-z0-9-]{1,30}$/;
const DOMAIN_OPTIONS = [
{
value: 'pic_ngo',
label: 'PIC.NGO subdomain',
description: 'Get a free yourname.pic.ngo domain — managed automatically.',
},
{
value: 'custom',
label: 'Custom domain',
description: 'Bring your own domain. You will configure DNS records manually.',
},
{
value: 'lan_only',
label: 'LAN only',
description: 'No public domain. Accessible only on your local network and via VPN.',
},
];
const DDNS_OPTIONS = [
{ value: 'pic_ngo', label: 'pic.ngo (managed)', description: 'Automatic — no setup required.' },
{ value: 'cloudflare', label: 'Cloudflare', description: 'Use Cloudflare DNS with API token.' },
{ value: 'duckdns', label: 'DuckDNS', description: 'Free dynamic DNS via duckdns.org.' },
{ value: 'noip', label: 'No-IP', description: 'Free dynamic DNS via noip.com.' },
{ value: 'freedns', label: 'FreeDNS', description: 'Free DNS via freedns.afraid.org.' },
{ value: 'manual', label: 'Manual / None', description: 'You will handle DNS updates yourself.' },
];
const OPTIONAL_SERVICES = [
{ key: 'email', label: 'Email', description: 'Postfix + Dovecot IMAP/SMTP server.' },
{ key: 'calendar', label: 'Calendar & Contacts', description: 'CalDAV/CardDAV via Radicale.' },
{ key: 'files', label: 'Files (WebDAV)', description: 'WebDAV file storage accessible from any device.' },
{ key: 'webmail', label: 'Webmail UI', description: 'Browser-based email client (Roundcube).' },
];
const ALWAYS_ON_SERVICES = [
{ key: 'vpn', label: 'VPN (WireGuard)' },
{ key: 'dns', label: 'DNS (CoreDNS)' },
{ key: 'api', label: 'API (cell-api)' },
];
// helpers
function getAllTimezones() {
try {
return Intl.supportedValuesOf('timeZone');
} catch {
// Fallback list for older browsers
return [
'UTC',
'America/New_York',
'America/Chicago',
'America/Denver',
'America/Los_Angeles',
'Europe/London',
'Europe/Paris',
'Europe/Berlin',
'Asia/Tokyo',
'Asia/Shanghai',
'Australia/Sydney',
];
}
}
function passwordStrength(pw) {
if (!pw) return { label: '', color: '', width: '0%' };
let score = 0;
if (pw.length >= 12) score++;
if (pw.length >= 16) score++;
if (/[A-Z]/.test(pw)) score++;
if (/[0-9]/.test(pw)) score++;
if (/[^A-Za-z0-9]/.test(pw)) score++;
if (score <= 1) return { label: 'Weak', color: 'bg-red-500', width: '20%' };
if (score === 2) return { label: 'Fair', color: 'bg-yellow-500', width: '40%' };
if (score === 3) return { label: 'Good', color: 'bg-blue-500', width: '65%' };
return { label: 'Strong', color: 'bg-green-500', width: '100%' };
}
// sub-components
function StepHeader({ step, title, description }) {
return (
<div className="mb-6">
<p className="text-xs font-medium text-blue-400 uppercase tracking-wider mb-1">
Step {step} of {TOTAL_STEPS}
</p>
<h2 className="text-lg font-semibold text-white">{title}</h2>
{description && <p className="mt-1 text-sm text-gray-400">{description}</p>}
</div>
);
}
function ProgressBar({ step }) {
const pct = Math.round((step / TOTAL_STEPS) * 100);
return (
<div className="mb-8">
<div className="flex justify-between text-xs text-gray-500 mb-1">
<span>Setup progress</span>
<span>{pct}%</span>
</div>
<div className="w-full bg-gray-700 rounded-full h-1.5">
<div
className="bg-blue-500 h-1.5 rounded-full transition-all duration-300"
style={{ width: `${pct}%` }}
role="progressbar"
aria-valuenow={step}
aria-valuemin={1}
aria-valuemax={TOTAL_STEPS}
/>
</div>
</div>
);
}
function FieldError({ message }) {
if (!message) return null;
return (
<p className="mt-1.5 flex items-center gap-1 text-xs text-red-400" role="alert">
<AlertCircle className="h-3.5 w-3.5 flex-shrink-0" />
{message}
</p>
);
}
function RadioOption({ value, selected, label, description, onChange }) {
return (
<label
className={`flex items-start gap-3 p-3 rounded-lg border cursor-pointer transition-colors ${
selected
? 'border-blue-500 bg-blue-950/40'
: 'border-gray-700 hover:border-gray-500'
}`}
>
<input
type="radio"
className="mt-0.5 accent-blue-500"
value={value}
checked={selected}
onChange={() => onChange(value)}
/>
<div>
<div className="text-sm font-medium text-white">{label}</div>
{description && <div className="text-xs text-gray-400 mt-0.5">{description}</div>}
</div>
</label>
);
}
function NavButtons({ onBack, onNext, nextLabel = 'Next', nextDisabled = false, loading = false }) {
return (
<div className="flex justify-between mt-8 pt-6 border-t border-gray-700">
{onBack ? (
<button
type="button"
onClick={onBack}
className="px-4 py-2 text-sm font-medium text-gray-300 bg-gray-800 hover:bg-gray-700 border border-gray-600 rounded-md transition-colors"
>
Back
</button>
) : (
<div />
)}
<button
type="button"
onClick={onNext}
disabled={nextDisabled || loading}
className="px-5 py-2 text-sm font-medium text-white bg-blue-600 hover:bg-blue-500 disabled:opacity-50 disabled:cursor-not-allowed rounded-md transition-colors flex items-center gap-2"
>
{loading && (
<span className="animate-spin rounded-full h-3.5 w-3.5 border-b-2 border-white" />
)}
{nextLabel}
</button>
</div>
);
}
// step screens
function Step1CellName({ value, onChange, onNext }) {
const [error, setError] = useState('');
const [serverError, setServerError] = useState('');
const [loading, setLoading] = useState(false);
const validate = () => {
if (!value.trim()) return 'Cell name is required.';
if (!CELL_NAME_RE.test(value))
return 'Use lowercase letters, numbers, and hyphens only. Must start with a letter. 231 characters.';
return '';
};
const handleNext = async () => {
const err = validate();
setError(err);
setServerError('');
if (err) return;
setLoading(true);
try {
await setupAPI.validate('cell_name', { cell_name: value });
onNext();
} catch (e) {
setServerError(
e?.response?.data?.error || 'Validation failed. Please try a different name.'
);
} finally {
setLoading(false);
}
};
return (
<div>
<StepHeader
step={1}
title="Name your cell"
description="This is the internal identifier for your Personal Internet Cell. It appears in hostnames and logs."
/>
<div>
<label className="block text-sm text-gray-400 mb-1.5" htmlFor="cell-name">
Cell name <span className="text-red-400">*</span>
</label>
<input
id="cell-name"
type="text"
autoComplete="off"
spellCheck={false}
value={value}
onChange={e => {
onChange(e.target.value.toLowerCase());
setError('');
setServerError('');
}}
onKeyDown={e => e.key === 'Enter' && handleNext()}
placeholder="e.g. homelab"
className="w-full bg-gray-800 border border-gray-600 rounded px-3 py-2 text-white text-sm focus:outline-none focus:border-blue-500 placeholder-gray-600"
aria-describedby={error || serverError ? 'cell-name-error' : undefined}
/>
<p className="mt-1 text-xs text-gray-500">
Lowercase letters, numbers, hyphens. Must start with a letter. 231 characters.
</p>
<div id="cell-name-error">
<FieldError message={error || serverError} />
</div>
</div>
<NavButtons onNext={handleNext} loading={loading} nextDisabled={!value.trim()} />
</div>
);
}
function Step2Password({ password, confirm, onChangePassword, onChangeConfirm, onNext, onBack }) {
const [showPw, setShowPw] = useState(false);
const [showConfirm, setShowConfirm] = useState(false);
const [errors, setErrors] = useState({});
const strength = passwordStrength(password);
const validate = () => {
const e = {};
if (!password) e.password = 'Password is required.';
else if (password.length < 12) e.password = 'Password must be at least 12 characters.';
if (!confirm) e.confirm = 'Please confirm your password.';
else if (password !== confirm) e.confirm = 'Passwords do not match.';
return e;
};
const handleNext = () => {
const e = validate();
setErrors(e);
if (Object.keys(e).length === 0) onNext();
};
const isReady = password.length >= 12 && password === confirm;
return (
<div>
<StepHeader
step={2}
title="Set admin password"
description="This password protects access to your cell. Choose something strong and store it safely."
/>
<div className="space-y-4">
<div>
<label className="block text-sm text-gray-400 mb-1.5" htmlFor="pw">
Password <span className="text-red-400">*</span>
</label>
<div className="relative">
<input
id="pw"
type={showPw ? 'text' : 'password'}
autoComplete="new-password"
value={password}
onChange={e => { onChangePassword(e.target.value); setErrors(p => ({ ...p, password: '' })); }}
className="w-full bg-gray-800 border border-gray-600 rounded px-3 py-2 pr-9 text-white text-sm focus:outline-none focus:border-blue-500"
aria-describedby={errors.password ? 'pw-error' : undefined}
/>
<button
type="button"
onClick={() => setShowPw(v => !v)}
className="absolute inset-y-0 right-0 flex items-center px-2.5 text-gray-400 hover:text-gray-200"
tabIndex={-1}
aria-label={showPw ? 'Hide password' : 'Show password'}
>
{showPw ? <EyeOff className="h-4 w-4" /> : <Eye className="h-4 w-4" />}
</button>
</div>
{/* Strength bar */}
{password.length > 0 && (
<div className="mt-2">
<div className="w-full bg-gray-700 rounded-full h-1">
<div
className={`h-1 rounded-full transition-all duration-300 ${strength.color}`}
style={{ width: strength.width }}
/>
</div>
<p className="text-xs text-gray-400 mt-1">Strength: {strength.label}</p>
</div>
)}
<div id="pw-error"><FieldError message={errors.password} /></div>
</div>
<div>
<label className="block text-sm text-gray-400 mb-1.5" htmlFor="pw-confirm">
Confirm password <span className="text-red-400">*</span>
</label>
<div className="relative">
<input
id="pw-confirm"
type={showConfirm ? 'text' : 'password'}
autoComplete="new-password"
value={confirm}
onChange={e => { onChangeConfirm(e.target.value); setErrors(p => ({ ...p, confirm: '' })); }}
onKeyDown={e => e.key === 'Enter' && handleNext()}
className="w-full bg-gray-800 border border-gray-600 rounded px-3 py-2 pr-9 text-white text-sm focus:outline-none focus:border-blue-500"
aria-describedby={errors.confirm ? 'pw-confirm-error' : undefined}
/>
<button
type="button"
onClick={() => setShowConfirm(v => !v)}
className="absolute inset-y-0 right-0 flex items-center px-2.5 text-gray-400 hover:text-gray-200"
tabIndex={-1}
aria-label={showConfirm ? 'Hide password' : 'Show password'}
>
{showConfirm ? <EyeOff className="h-4 w-4" /> : <Eye className="h-4 w-4" />}
</button>
</div>
<div id="pw-confirm-error"><FieldError message={errors.confirm} /></div>
</div>
</div>
<NavButtons onBack={onBack} onNext={handleNext} nextDisabled={!isReady} />
</div>
);
}
function Step3Domain({ value, onChange, onNext, onBack }) {
return (
<div>
<StepHeader
step={3}
title="Choose your domain"
description="How will you and your peers reach this cell over the internet?"
/>
<div className="space-y-2">
{DOMAIN_OPTIONS.map(opt => (
<RadioOption
key={opt.value}
value={opt.value}
label={opt.label}
description={opt.description}
selected={value === opt.value}
onChange={onChange}
/>
))}
</div>
<NavButtons onBack={onBack} onNext={onNext} nextDisabled={!value} />
</div>
);
}
function Step4DDNS({ value, onChange, onNext, onBack }) {
return (
<div>
<StepHeader
step={4}
title="DDNS provider"
description="Which provider will keep your dynamic IP address up to date? Credentials are configured separately after setup."
/>
<div className="space-y-2">
{DDNS_OPTIONS.map(opt => (
<RadioOption
key={opt.value}
value={opt.value}
label={opt.label}
description={opt.description}
selected={value === opt.value}
onChange={onChange}
/>
))}
</div>
<NavButtons onBack={onBack} onNext={onNext} nextDisabled={!value} />
</div>
);
}
function Step5Services({ selected, onChange, onNext, onBack }) {
const toggle = key => {
onChange(
selected.includes(key) ? selected.filter(k => k !== key) : [...selected, key]
);
};
return (
<div>
<StepHeader
step={5}
title="Optional services"
description="Choose which services to enable. You can change this later in Settings."
/>
{/* Optional services */}
<div className="space-y-2 mb-6">
{OPTIONAL_SERVICES.map(svc => {
const checked = selected.includes(svc.key);
return (
<label
key={svc.key}
className={`flex items-start gap-3 p-3 rounded-lg border cursor-pointer transition-colors ${
checked ? 'border-blue-500 bg-blue-950/40' : 'border-gray-700 hover:border-gray-500'
}`}
>
<input
type="checkbox"
className="mt-0.5 accent-blue-500"
checked={checked}
onChange={() => toggle(svc.key)}
/>
<div>
<div className="text-sm font-medium text-white">{svc.label}</div>
<div className="text-xs text-gray-400 mt-0.5">{svc.description}</div>
</div>
</label>
);
})}
</div>
{/* Always-on services */}
<div>
<p className="text-xs font-medium text-gray-500 uppercase tracking-wider mb-2">
Always enabled
</p>
<div className="space-y-1.5">
{ALWAYS_ON_SERVICES.map(svc => (
<div
key={svc.key}
className="flex items-center gap-3 p-3 rounded-lg border border-gray-800 bg-gray-900/40 opacity-60"
>
<input type="checkbox" checked readOnly disabled className="mt-0 accent-blue-500" aria-label={`${svc.label} is always enabled`} />
<span className="text-sm text-gray-400">{svc.label}</span>
<span className="ml-auto text-xs text-gray-600">always enabled</span>
</div>
))}
</div>
</div>
<NavButtons onBack={onBack} onNext={onNext} />
</div>
);
}
function Step6Timezone({ value, onChange, onNext, onBack }) {
const [query, setQuery] = useState('');
const allZones = useMemo(() => getAllTimezones(), []);
const filtered = useMemo(() => {
const q = query.toLowerCase();
return q ? allZones.filter(z => z.toLowerCase().includes(q)) : allZones;
}, [query, allZones]);
return (
<div>
<StepHeader
step={6}
title="Timezone"
description="Used for log timestamps, cron jobs, and email headers."
/>
<div>
<label className="block text-sm text-gray-400 mb-1.5" htmlFor="tz-search">
Search timezone
</label>
<input
id="tz-search"
type="text"
value={query}
onChange={e => setQuery(e.target.value)}
placeholder="e.g. New York, Berlin, Tokyo"
className="w-full bg-gray-800 border border-gray-600 rounded px-3 py-2 text-white text-sm focus:outline-none focus:border-blue-500 placeholder-gray-600 mb-2"
/>
<label className="block text-sm text-gray-400 mb-1.5" htmlFor="tz-select">
Select timezone <span className="text-red-400">*</span>
</label>
<select
id="tz-select"
value={value}
onChange={e => onChange(e.target.value)}
size={8}
className="w-full bg-gray-800 border border-gray-600 rounded px-3 py-2 text-white text-sm focus:outline-none focus:border-blue-500"
>
{filtered.map(z => (
<option key={z} value={z}>{z}</option>
))}
</select>
{value && (
<p className="mt-2 text-xs text-gray-400">
Selected: <span className="text-white">{value}</span>
</p>
)}
</div>
<NavButtons onBack={onBack} onNext={onNext} nextDisabled={!value} />
</div>
);
}
function ReviewRow({ label, value }) {
return (
<div className="flex justify-between py-2.5 border-b border-gray-800 last:border-0">
<span className="text-sm text-gray-400">{label}</span>
<span className="text-sm text-white font-medium text-right max-w-[60%] break-words">{value}</span>
</div>
);
}
function Step7Review({ fields, onBack, onSubmit, submitting, submitError }) {
const domainLabel = DOMAIN_OPTIONS.find(o => o.value === fields.domain_type)?.label || fields.domain_type;
const ddnsLabel = DDNS_OPTIONS.find(o => o.value === fields.ddns_provider)?.label || fields.ddns_provider;
const serviceLabels = fields.services.length
? fields.services.map(k => OPTIONAL_SERVICES.find(s => s.key === k)?.label || k).join(', ')
: 'None selected';
return (
<div>
<StepHeader
step={7}
title="Review and finish"
description="Check your choices below. You can go back to change anything before completing setup."
/>
<div className="bg-gray-800/50 border border-gray-700 rounded-lg px-4 py-1 mb-2">
<ReviewRow label="Cell name" value={fields.cell_name} />
<ReviewRow label="Admin password" value="••••••••••••" />
<ReviewRow label="Domain type" value={domainLabel} />
{fields.domain_type !== 'lan_only' && (
<ReviewRow label="DDNS provider" value={ddnsLabel} />
)}
<ReviewRow label="Optional services" value={serviceLabels} />
<ReviewRow label="Timezone" value={fields.timezone} />
</div>
{submitError && (
<div className="mt-4 p-3 bg-red-950/50 border border-red-700 rounded-lg flex items-start gap-2">
<AlertCircle className="h-4 w-4 text-red-400 flex-shrink-0 mt-0.5" />
<p className="text-sm text-red-300">{submitError}</p>
</div>
)}
<NavButtons
onBack={onBack}
onNext={onSubmit}
nextLabel="Complete setup"
loading={submitting}
/>
</div>
);
}
// main component
export default function Setup() {
const navigate = useNavigate();
const [step, setStep] = useState(1);
const [done, setDone] = useState(false);
// Form state
const [cellName, setCellName] = useState('');
const [password, setPassword] = useState('');
const [passwordConfirm, setPasswordConfirm] = useState('');
const [domainType, setDomainType] = useState('pic_ngo');
const [ddnsProvider, setDdnsProvider] = useState('pic_ngo');
const [services, setServices] = useState(['email', 'calendar', 'files', 'webmail']);
const [timezone, setTimezone] = useState(
(() => { try { return Intl.DateTimeFormat().resolvedOptions().timeZone || 'UTC'; } catch { return 'UTC'; } })()
);
// Submit state
const [submitting, setSubmitting] = useState(false);
const [submitError, setSubmitError] = useState('');
const skipDdns = domainType === 'lan_only';
const goNext = () => setStep(s => Math.min(s + 1, TOTAL_STEPS));
const goBack = () => setStep(s => Math.max(s - 1, 1));
// Skip step 4 when LAN only
const handleStep3Next = () => {
if (skipDdns) setStep(5);
else setStep(4);
};
const handleStep4Back = () => setStep(3);
const handleStep5Back = () => {
if (skipDdns) setStep(3);
else setStep(4);
};
const handleSubmit = async () => {
setSubmitError('');
setSubmitting(true);
const payload = {
cell_name: cellName,
password,
domain_type: domainType,
...(skipDdns ? {} : { ddns_provider: ddnsProvider }),
services,
timezone,
};
try {
await setupAPI.complete(payload);
setDone(true);
setTimeout(() => navigate('/login', { replace: true }), 2000);
} catch (e) {
setSubmitError(
e?.response?.data?.error ||
'Setup could not be completed. Please check your entries and try again.'
);
} finally {
setSubmitting(false);
}
};
const allFields = { cell_name: cellName, domain_type: domainType, ddns_provider: ddnsProvider, services, timezone };
if (done) {
return (
<div className="flex items-center justify-center min-h-screen bg-gray-950">
<div className="text-center">
<CheckCircle className="h-12 w-12 text-green-400 mx-auto mb-4" />
<h2 className="text-lg font-semibold text-white mb-2">Setup complete!</h2>
<p className="text-sm text-gray-400">Redirecting to login...</p>
</div>
</div>
);
}
return (
<div className="flex items-center justify-center min-h-screen bg-gray-950 px-4 py-10">
<div className="w-full max-w-lg bg-gray-900 border border-gray-700 rounded-xl p-8 shadow-2xl">
{/* Page title */}
<div className="mb-6">
<h1 className="text-xl font-bold text-white">Personal Internet Cell</h1>
<p className="text-sm text-gray-400 mt-0.5">First-time setup</p>
</div>
<ProgressBar step={step} />
{step === 1 && (
<Step1CellName value={cellName} onChange={setCellName} onNext={goNext} />
)}
{step === 2 && (
<Step2Password
password={password}
confirm={passwordConfirm}
onChangePassword={setPassword}
onChangeConfirm={setPasswordConfirm}
onNext={goNext}
onBack={goBack}
/>
)}
{step === 3 && (
<Step3Domain value={domainType} onChange={setDomainType} onNext={handleStep3Next} onBack={goBack} />
)}
{step === 4 && (
<Step4DDNS value={ddnsProvider} onChange={setDdnsProvider} onNext={goNext} onBack={handleStep4Back} />
)}
{step === 5 && (
<Step5Services selected={services} onChange={setServices} onNext={goNext} onBack={handleStep5Back} />
)}
{step === 6 && (
<Step6Timezone value={timezone} onChange={setTimezone} onNext={goNext} onBack={goBack} />
)}
{step === 7 && (
<Step7Review
fields={allFields}
onBack={goBack}
onSubmit={handleSubmit}
submitting={submitting}
submitError={submitError}
/>
)}
</div>
</div>
);
}
+429
View File
@@ -0,0 +1,429 @@
import { useState, useEffect, useCallback } from 'react';
import {
Package,
Download,
Trash2,
RefreshCw,
CheckCircle,
AlertCircle,
} from 'lucide-react';
import { storeAPI } from '../services/api';
// Toast helpers (same pattern as Settings.jsx)
function toastEvent(msg, type = 'success') {
window.dispatchEvent(new CustomEvent('store-toast', { detail: { msg, type } }));
}
function Toast({ toasts }) {
return (
<div className="fixed bottom-4 right-4 z-50 space-y-2 pointer-events-none">
{toasts.map((t) => (
<div
key={t.id}
className={`px-4 py-3 rounded-lg shadow-lg text-sm text-white flex items-center gap-2 pointer-events-auto ${
t.type === 'success' ? 'bg-green-600' : t.type === 'error' ? 'bg-red-600' : 'bg-yellow-600'
}`}
>
{t.type === 'success' ? (
<CheckCircle className="h-4 w-4 shrink-0" />
) : (
<AlertCircle className="h-4 w-4 shrink-0" />
)}
{t.msg}
</div>
))}
</div>
);
}
function useToasts() {
const [toasts, setToasts] = useState([]);
useEffect(() => {
const handler = (e) => {
const id = Date.now();
setToasts((prev) => [...prev, { ...e.detail, id }]);
setTimeout(() => setToasts((prev) => prev.filter((t) => t.id !== id)), 4000);
};
window.addEventListener('store-toast', handler);
return () => window.removeEventListener('store-toast', handler);
}, []);
return toasts;
}
// Skeleton card
function SkeletonCard() {
return (
<div className="card animate-pulse">
<div className="h-4 bg-gray-200 rounded w-1/2 mb-2" />
<div className="h-3 bg-gray-100 rounded w-3/4 mb-1" />
<div className="h-3 bg-gray-100 rounded w-1/2 mb-4" />
<div className="flex justify-between items-center mt-auto">
<div className="h-3 bg-gray-100 rounded w-1/4" />
<div className="h-8 bg-gray-200 rounded w-20" />
</div>
</div>
);
}
// Confirm remove dialog
function ConfirmRemoveDialog({ service, onConfirm, onCancel }) {
const [purge, setPurge] = useState(false);
return (
<div className="fixed inset-0 bg-black/40 flex items-center justify-center z-50">
<div className="bg-white rounded-xl shadow-xl p-6 w-96 mx-4">
<div className="flex items-start gap-3 mb-4">
<AlertCircle className="h-5 w-5 text-red-500 mt-0.5 shrink-0" />
<div>
<h3 className="font-semibold text-gray-900">Remove {service.name}?</h3>
<p className="text-sm text-gray-500 mt-1">
The service will be stopped and uninstalled. By default, data is kept on disk.
</p>
</div>
</div>
<label className="flex items-center gap-2 cursor-pointer select-none mb-5">
<input
type="checkbox"
checked={purge}
onChange={(e) => setPurge(e.target.checked)}
className="h-4 w-4 rounded border-gray-300 text-red-600 focus:ring-red-400"
/>
<span className="text-sm text-gray-700">
Also delete service data (cannot be undone)
</span>
</label>
<div className="flex gap-2 justify-end">
<button
onClick={onCancel}
className="btn-secondary text-sm"
>
Cancel
</button>
<button
onClick={() => onConfirm(purge)}
className="px-4 py-2 text-sm font-medium text-white bg-red-600 hover:bg-red-700 rounded-md transition-colors"
>
{purge ? 'Remove and Delete Data' : 'Remove Service'}
</button>
</div>
</div>
</div>
);
}
// Service card
function ServiceCard({ service, isInstalled, installedInfo, onInstall, onRemove, installing, removing }) {
return (
<div className="card flex flex-col gap-3">
{/* Header row */}
<div className="flex items-start justify-between gap-2">
<div className="flex items-center gap-2 min-w-0">
<Package className="h-5 w-5 text-primary-500 shrink-0" />
<span className="font-semibold text-gray-900 truncate">{service.name}</span>
</div>
{isInstalled && (
<span className="flex items-center gap-1 text-xs font-medium text-green-700 bg-green-50 border border-green-200 rounded-full px-2 py-0.5 shrink-0">
<CheckCircle className="h-3 w-3" />
Installed
</span>
)}
</div>
{/* Description */}
<p className="text-sm text-gray-500 flex-1">
{service.description || 'No description available.'}
</p>
{/* Meta row */}
<div className="flex flex-wrap gap-x-4 gap-y-1 text-xs text-gray-400">
{service.version && (
<span>v{service.version}</span>
)}
{service.author && (
<span>by {service.author}</span>
)}
{isInstalled && installedInfo?.installed_at && (
<span>Installed {new Date(installedInfo.installed_at).toLocaleDateString()}</span>
)}
</div>
{/* Action */}
<div className="flex justify-end pt-1 border-t border-gray-100">
{isInstalled ? (
<button
onClick={() => onRemove(service)}
disabled={removing}
className="flex items-center gap-1.5 px-3 py-1.5 text-sm font-medium text-white bg-red-600 hover:bg-red-700 disabled:opacity-50 disabled:cursor-not-allowed rounded-md transition-colors"
aria-label={`Remove ${service.name}`}
>
{removing ? (
<RefreshCw className="h-4 w-4 animate-spin" />
) : (
<Trash2 className="h-4 w-4" />
)}
{removing ? 'Removing…' : 'Remove'}
</button>
) : (
<button
onClick={() => onInstall(service)}
disabled={installing}
className="flex items-center gap-1.5 px-3 py-1.5 text-sm font-medium text-white bg-primary-600 hover:bg-primary-700 disabled:opacity-50 disabled:cursor-not-allowed rounded-md transition-colors"
aria-label={`Install ${service.name}`}
>
{installing ? (
<RefreshCw className="h-4 w-4 animate-spin" />
) : (
<Download className="h-4 w-4" />
)}
{installing ? 'Installing…' : 'Install'}
</button>
)}
</div>
</div>
);
}
// Main Store component
function Store() {
const toasts = useToasts();
const [services, setServices] = useState([]); // available services array
const [installed, setInstalled] = useState({}); // map of id -> installed info
const [isLoading, setIsLoading] = useState(true);
const [loadError, setLoadError] = useState(null);
const [refreshing, setRefreshing] = useState(false);
// Per-service operation state: { [id]: 'installing' | 'removing' | null }
const [opState, setOpState] = useState({});
// Pending remove confirmation dialog
const [removeTarget, setRemoveTarget] = useState(null); // service object or null
const loadStore = useCallback(async () => {
setLoadError(null);
try {
const res = await storeAPI.listServices();
const data = res.data || {};
setServices(Array.isArray(data.available) ? data.available : []);
setInstalled(data.installed && typeof data.installed === 'object' ? data.installed : {});
} catch (err) {
const msg =
err.response?.data?.error ||
err.response?.data?.message ||
'Could not load the service store. Check that the API is reachable.';
setLoadError(msg);
} finally {
setIsLoading(false);
}
}, []);
useEffect(() => {
loadStore();
}, [loadStore]);
const handleRefresh = async () => {
setRefreshing(true);
try {
await storeAPI.refreshIndex();
toastEvent('Store index refreshed');
await loadStore();
} catch (err) {
const msg =
err.response?.data?.error ||
err.response?.data?.message ||
'Failed to refresh store index';
toastEvent(msg, 'error');
} finally {
setRefreshing(false);
}
};
const handleInstall = async (service) => {
setOpState((s) => ({ ...s, [service.id]: 'installing' }));
try {
await storeAPI.installService(service.id);
toastEvent(`${service.name} installed successfully`);
await loadStore();
} catch (err) {
const msg =
err.response?.data?.error ||
err.response?.data?.message ||
`Failed to install ${service.name}`;
toastEvent(msg, 'error');
} finally {
setOpState((s) => ({ ...s, [service.id]: null }));
}
};
const handleRemoveClick = (service) => {
setRemoveTarget(service);
};
const handleRemoveConfirm = async (purge) => {
const service = removeTarget;
setRemoveTarget(null);
setOpState((s) => ({ ...s, [service.id]: 'removing' }));
try {
await storeAPI.removeService(service.id, purge);
toastEvent(`${service.name} removed`);
await loadStore();
} catch (err) {
const msg =
err.response?.data?.error ||
err.response?.data?.message ||
`Failed to remove ${service.name}`;
toastEvent(msg, 'error');
} finally {
setOpState((s) => ({ ...s, [service.id]: null }));
}
};
// Render
const installedServices = services.filter((s) => installed[s.id]);
const availableServices = services.filter((s) => !installed[s.id]);
return (
<div>
<Toast toasts={toasts} />
{/* Page header */}
<div className="mb-6 flex items-start justify-between gap-4">
<div>
<h1 className="text-2xl font-bold text-gray-900">Service Store</h1>
<p className="mt-1 text-sm text-gray-500">
Browse and install optional services for your Personal Internet Cell
</p>
</div>
<button
onClick={handleRefresh}
disabled={refreshing || isLoading}
className="btn-secondary flex items-center gap-2 text-sm shrink-0"
aria-label="Refresh store index"
>
<RefreshCw className={`h-4 w-4 ${refreshing ? 'animate-spin' : ''}`} />
{refreshing ? 'Refreshing…' : 'Refresh Store'}
</button>
</div>
{/* Loading state */}
{isLoading && (
<div>
<div className="h-4 bg-gray-200 rounded w-40 mb-4 animate-pulse" />
<div className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-4">
{[1, 2, 3, 4, 5, 6].map((n) => (
<SkeletonCard key={n} />
))}
</div>
</div>
)}
{/* Error state */}
{!isLoading && loadError && (
<div className="card border border-red-200 bg-red-50">
<div className="flex items-start gap-3">
<AlertCircle className="h-5 w-5 text-red-500 mt-0.5 shrink-0" />
<div className="flex-1">
<p className="text-sm font-medium text-red-800">Failed to load store</p>
<p className="text-sm text-red-600 mt-1">{loadError}</p>
</div>
<button
onClick={() => { setIsLoading(true); loadStore(); }}
className="btn-secondary text-sm shrink-0"
>
Retry
</button>
</div>
</div>
)}
{/* Content */}
{!isLoading && !loadError && (
<>
{/* Installed services section */}
{installedServices.length > 0 && (
<section className="mb-8">
<h2 className="text-sm font-semibold text-gray-500 uppercase tracking-wide mb-3">
Installed ({installedServices.length})
</h2>
<div className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-4">
{installedServices.map((svc) => (
<ServiceCard
key={svc.id}
service={svc}
isInstalled={true}
installedInfo={installed[svc.id]}
onInstall={handleInstall}
onRemove={handleRemoveClick}
installing={opState[svc.id] === 'installing'}
removing={opState[svc.id] === 'removing'}
/>
))}
</div>
</section>
)}
{/* Available services section */}
<section>
<h2 className="text-sm font-semibold text-gray-500 uppercase tracking-wide mb-3">
{installedServices.length > 0 ? 'Available to Install' : 'Available Services'}
{availableServices.length > 0 && ` (${availableServices.length})`}
</h2>
{availableServices.length === 0 && installedServices.length === 0 && (
<div className="card border border-gray-100 text-center py-12">
<Package className="h-10 w-10 text-gray-300 mx-auto mb-3" />
<p className="text-sm font-medium text-gray-500">No services in the store yet</p>
<p className="text-xs text-gray-400 mt-1">
Click "Refresh Store" to check for available services.
</p>
</div>
)}
{availableServices.length === 0 && installedServices.length > 0 && (
<div className="card border border-gray-100 text-center py-8">
<CheckCircle className="h-8 w-8 text-green-400 mx-auto mb-2" />
<p className="text-sm text-gray-500">All available services are installed.</p>
</div>
)}
{availableServices.length > 0 && (
<div className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-4">
{availableServices.map((svc) => (
<ServiceCard
key={svc.id}
service={svc}
isInstalled={false}
installedInfo={null}
onInstall={handleInstall}
onRemove={handleRemoveClick}
installing={opState[svc.id] === 'installing'}
removing={opState[svc.id] === 'removing'}
/>
))}
</div>
)}
</section>
</>
)}
{/* Remove confirmation dialog */}
{removeTarget && (
<ConfirmRemoveDialog
service={removeTarget}
onConfirm={handleRemoveConfirm}
onCancel={() => setRemoveTarget(null)}
/>
)}
</div>
);
}
export default Store;
+28
View File
@@ -288,6 +288,16 @@ export const cellLinkAPI = {
getServices: () => api.get('/api/cells/services'),
};
// Service Store API
export const storeAPI = {
listServices: () => api.get('/api/store/services'),
getManifest: (id) => api.get(`/api/store/services/${id}/manifest`),
installService: (id) => api.post(`/api/store/services/${id}/install`),
removeService: (id, purge = false) => api.delete(`/api/store/services/${id}`, { params: { purge } }),
listInstalled: () => api.get('/api/store/installed'),
refreshIndex: () => api.post('/api/store/refresh'),
};
// Health check
export const healthAPI = {
check: () => api.get('/health'),
@@ -311,6 +321,24 @@ export const logsAPI = {
setVerbosity: (levels) => api.put('/api/logs/verbosity', levels),
};
// Setup Wizard API
export const setupAPI = {
getStatus: () => api.get('/api/setup/status'),
validate: (step, data) => api.post('/api/setup/validate', { step, data }),
complete: (payload) => api.post('/api/setup/complete', payload),
};
// Connectivity / Exit Routing API
export const connectivityAPI = {
getStatus: () => api.get('/api/connectivity/status'),
listExits: () => api.get('/api/connectivity/exits'),
uploadWireguard: (conf_text) => api.post('/api/connectivity/exits/wireguard', { conf_text }),
uploadOpenvpn: (ovpn_text, name = 'default') => api.post('/api/connectivity/exits/openvpn', { ovpn_text, name }),
applyRoutes: () => api.post('/api/connectivity/exits/apply'),
getPeerExits: () => api.get('/api/connectivity/peers'),
setPeerExit: (peer_name, exit_via) => api.put(`/api/connectivity/peers/${peer_name}/exit`, { exit_via }),
};
// Container Management API
export const containerAPI = {
// Containers