diff --git a/Makefile b/Makefile index f9f34d6..49485a6 100644 --- a/Makefile +++ b/Makefile @@ -8,6 +8,7 @@ backup restore \ test test-all test-unit test-coverage test-api test-cli \ test-phase1 test-phase2 test-phase3 test-phase4 test-all-phases \ + test-integration test-integration-readonly \ show-routes add-peer list-peers # Detect docker compose command (v2 plugin preferred, fallback to v1 standalone) @@ -54,7 +55,9 @@ help: @echo "" @echo "Tests:" @echo " test - Run all tests" - @echo " test-coverage - Run tests with HTML coverage report" + @echo " test-coverage - Run tests with HTML coverage report" + @echo " test-integration - Full integration tests (needs running stack)" + @echo " test-integration-readonly - Read-only integration tests (safe to run anytime)" @echo "" @echo "Peers:" @echo " list-peers - List configured WireGuard peers" @@ -225,7 +228,15 @@ test-unit: pytest tests/ test-coverage: - pytest tests/ api/tests/ --cov=api --cov-report=html --cov-report=term-missing -v + pytest tests/ api/tests/ --cov=api --cov-report=html --cov-report=term-missing --cov-fail-under=70 -v + +test-integration: + @echo "Running full integration tests (requires running PIC stack)..." + PIC_HOST=$${PIC_HOST:-localhost} pytest tests/integration/ -v + +test-integration-readonly: + @echo "Running read-only integration tests (no peer creation)..." + PIC_HOST=$${PIC_HOST:-localhost} pytest tests/integration/test_live_api.py tests/integration/test_webui.py -v test-api: cd api && python3 -m pytest tests/test_api_endpoints.py -v diff --git a/README.md b/README.md index d554176..0063e7f 100644 --- a/README.md +++ b/README.md @@ -1,539 +1,267 @@ - -# Personal Internet Cell - -## ๐ŸŒŸ Overview - -The Personal Internet Cell is a **production-grade, self-hosted, decentralized digital infrastructure** that empowers you to: - -- **Host your own services**: Email, calendar, contacts, files, DNS, DHCP, NTP -- **Secure mesh networking**: Connect with trusted peers via WireGuard VPN -- **Advanced routing**: VPN gateway, NAT, firewall, exit nodes, and bridge routing -- **Enterprise security**: Self-hosted CA, certificate management, trust systems -- **Modern management**: RESTful API, enhanced CLI, and comprehensive monitoring -- **Event-driven architecture**: Service orchestration and real-time communication - ---- - -## ๐Ÿš€ Key Features - -### ๐Ÿ”ง **Core Services** -- **Network Services**: DNS, DHCP, NTP with dynamic management -- **VPN & Mesh**: WireGuard-based peer federation with dynamic IP updates -- **Digital Services**: Email (SMTP/IMAP), Calendar/Contacts (CalDAV/CardDAV), File Storage (WebDAV) -- **Security**: Self-hosted Certificate Authority, Age/Fernet encryption, trust management -- **Container Orchestration**: Docker-based service management and deployment - -### ๐Ÿ—๏ธ **Architecture Highlights** -- **BaseServiceManager**: Unified interface across all 10 service managers -- **Event-Driven Service Bus**: Real-time service communication and orchestration -- **Centralized Configuration**: Type-safe validation, backup/restore, import/export -- **Comprehensive Logging**: Structured JSON logs with rotation, search, and export -- **Enhanced CLI**: Interactive mode, batch operations, service wizards -- **Health Monitoring**: Real-time health checks and performance metrics - -### ๐Ÿ“Š **Production Features** -- **Service Orchestration**: Automatic service dependency management -- **Configuration Management**: Schema validation, versioning, and migration -- **Error Handling**: Standardized error handling and recovery mechanisms -- **Testing**: Comprehensive test suite with 77%+ coverage -- **Documentation**: Complete API documentation and usage guides - ---- - -## ๐Ÿ“‹ Table of Contents - -1. [Quick Start](#quick-start) -2. [Architecture](#architecture) -3. [Service Managers](#service-managers) -4. [API Reference](#api-reference) -5. [CLI Guide](#cli-guide) -6. [Configuration](#configuration) -7. [Security](#security) -8. [Development](#development) -9. [Testing](#testing) -10. [Deployment](#deployment) -11. [Contributing](#contributing) -12. [License](#license) - ---- - -## ๐Ÿš€ Quick Start - -### Prerequisites - -- **Debian/Ubuntu** host (apt-based). All other dependencies are installed automatically. -- **2 GB+ RAM, 10 GB+ disk space** -- **Open ports**: 53 (DNS), 80/443 (HTTP/S), 3000 (API), 8081 (Web UI), 51820/udp (WireGuard) - -### 1. Install - -```bash -git clone pic -cd pic - -# Install all system dependencies (docker, python3, python3-cryptography, etc.) -make check-deps - -# Default cell (name=mycell, domain=cell, VPN=10.0.0.1/24, port=51820) -make setup -make start - -# Custom cell โ€” use when installing a second cell on a different host -CELL_NAME=pic1 VPN_ADDRESS=10.1.0.1/24 make setup && make start -``` - -`make check-deps` installs python3, python3-cryptography, docker, docker-compose, curl, openssl, git via apt and adds the current user to the docker group. - -`make setup` generates WireGuard keys, writes configs, and creates all data directories. - -`make start` builds and brings up all 12 Docker containers. - -### 2. Access - -| Service | URL | -|---------|-----| -| Web UI | `http://:8081` | -| API | `http://:3000` | -| Health | `http://:3000/health` | - -On a WireGuard client: `http://mycell.cell` (or whatever your cell name is). - -### 3. Local dev (no Docker) - -```bash -pip install -r api/requirements.txt -python api/app.py # API on :3000 - -cd webui && npm install && npm run dev # React UI on :5173 (proxies API to :3000) -``` - ---- - -## ๐Ÿ› ๏ธ Management Commands - -```bash -# First install -make check-deps # install all system packages via apt -make setup # generate keys, write configs -make start # start all 12 containers - -# Daily operations -make status # container status + API health -make logs # follow all logs -make logs-api # follow logs for one service (api, dns, wg, mail, caddy, ...) -make shell-api # open a shell inside a container - -# Deploy latest code -make update # git pull + rebuild + restart - -# Full wipe and reinstall (useful on test machine) -make reinstall # stop, wipe config/data, setup, start fresh - -# Remove everything -make uninstall # stop + remove images; prompts whether to also wipe config/data - -# Maintenance -make backup # tar config/ + data/ into backups/ -make restore # list available backups -make clean # remove containers/volumes, keep config/data - -# Tests -make test # run all tests -make test-coverage # tests + HTML coverage report -``` - ---- - -## ๐Ÿ”— Connecting Two Cells (PIC Mesh) - -Two PIC instances can form a mesh โ€” full site-to-site WireGuard tunnels with -automatic DNS forwarding so each cell's services are reachable from the other. - -### Install the second cell - -```bash -# On the second host (different VPN subnet; port 51820 is fine โ€” different machine) -CELL_NAME=pic1 VPN_ADDRESS=10.1.0.1/24 make setup && make start -``` - -### Exchange invites (two pastes, two clicks) - -1. On **Cell A** โ†’ open Web UI โ†’ **Cell Network** โ†’ copy the invite JSON. -2. On **Cell B** โ†’ **Cell Network** โ†’ paste into "Connect to Another Cell" โ†’ click **Connect**. -3. On **Cell B** โ†’ copy its invite JSON. -4. On **Cell A** โ†’ paste Cell B's invite โ†’ click **Connect**. - -Both cells now have: -- A site-to-site WireGuard peer (AllowedIPs = remote cell's VPN subnet). -- A CoreDNS forwarding block so `*.pic1.cell` resolves across the tunnel. - -The **Connected Cells** panel shows live handshake status (green = online). - -### Same-LAN tip - -If both cells share the same external IP (behind NAT), the auto-detected -endpoint in the invite will be the public IP. Replace it with the LAN IP -before clicking Connect so traffic stays local: - -```json -{ "endpoint": "192.168.31.50:51820", ... } -``` - ---- - -## ๐Ÿ—๏ธ Architecture - -### **Service Manager Architecture** - -All services inherit from `BaseServiceManager`, providing: -- **Unified Interface**: Consistent methods across all services -- **Health Monitoring**: Standardized health checks and metrics -- **Error Handling**: Centralized error handling and logging -- **Configuration**: Common configuration management patterns - -### **Event-Driven Service Bus** - -```python -# Services communicate via events -service_bus.register_service('network', network_manager) -service_bus.register_service('wireguard', wireguard_manager) -service_bus.publish_event(EventType.SERVICE_STARTED, 'network', data) -``` - -### **Service Dependencies** - -``` -wireguard โ†’ network -email โ†’ network, vault -calendar โ†’ network, vault -files โ†’ network, vault -routing โ†’ network, wireguard -vault โ†’ network -``` - ---- - -## ๐Ÿ”ง Service Managers - -### **Core Network Services** -- **NetworkManager**: DNS, DHCP, NTP with dynamic zone management -- **WireGuardManager**: VPN configuration, peer management, key generation -- **PeerRegistry**: Peer registration, IP updates, trust management - -### **Digital Services** -- **EmailManager**: SMTP/IMAP email with user management -- **CalendarManager**: CalDAV/CardDAV calendar and contacts -- **FileManager**: WebDAV file storage with user directories - -### **Infrastructure Services** -- **RoutingManager**: NAT, firewall, advanced routing (exit/bridge/split) -- **VaultManager**: Certificate authority, trust management, encryption -- **ContainerManager**: Docker orchestration and container management -- **CellManager**: Overall cell configuration and service orchestration - ---- - -## ๐Ÿ“ก API Reference - -### **Core Endpoints** - -```bash -# Service Status -GET /api/services/status -GET /api/services/connectivity - -# Configuration Management -GET /api/config -PUT /api/config -POST /api/config/backup -POST /api/config/restore/ - -# Service Bus -GET /api/services/bus/status -GET /api/services/bus/events -POST /api/services/bus/services//start - -# Logging -GET /api/logs/services/ -POST /api/logs/search -POST /api/logs/export -``` - -### **Service-Specific Endpoints** - -```bash -# Network Services -GET /api/dns/records -POST /api/dns/records -GET /api/dhcp/leases -GET /api/ntp/status - -# WireGuard & Peers -GET /api/wireguard/peers -POST /api/wireguard/peers -GET /api/wireguard/status - -# Digital Services -GET /api/email/users -GET /api/calendar/users -GET /api/files/users - -# Routing & Security -GET /api/routing/status -POST /api/routing/nat -GET /api/vault/certificates -``` - ---- - -## ๐Ÿ’ป CLI Guide - -### **Enhanced CLI Features** - -```bash -# Interactive Mode -python api/enhanced_cli.py --interactive - -# Batch Operations -python api/enhanced_cli.py --batch "status" "services" "health" - -# Configuration Management -python api/enhanced_cli.py --export-config json -python api/enhanced_cli.py --import-config config.json - -# Service Wizards -python api/enhanced_cli.py --wizard network -python api/enhanced_cli.py --wizard email - -# Health Monitoring -python api/enhanced_cli.py --health -python api/enhanced_cli.py --logs network -``` - -### **Service Management** - -```bash -# Show status -python api/enhanced_cli.py --status - -# List services -python api/enhanced_cli.py --services - -# Peer management -python api/enhanced_cli.py --peers - -# Service logs -python api/enhanced_cli.py --logs wireguard -``` - ---- - -## โš™๏ธ Configuration - -### **Configuration Management** - -```bash -# Export configuration -curl -X GET http://localhost:3000/api/config - -# Update configuration -curl -X PUT http://localhost:3000/api/config \ - -H "Content-Type: application/json" \ - -d '{"cell_name": "mycell", "domain": "mycell.cell"}' - -# Backup configuration -curl -X POST http://localhost:3000/api/config/backup -``` - -### **Service Configuration** - -Each service has its own configuration schema: -- **Network**: DNS zones, DHCP ranges, NTP servers -- **WireGuard**: Interface settings, peer configurations -- **Email**: Domain settings, user accounts, mailboxes -- **Calendar**: User accounts, calendar sharing -- **Files**: Storage quotas, user directories -- **Routing**: NAT rules, firewall policies, routing tables - ---- - -## ๐Ÿ”’ Security - -### **Certificate Management** -- **Self-hosted CA**: Issue and manage TLS certificates -- **Certificate Lifecycle**: Generate, renew, revoke certificates -- **Trust Management**: Direct and indirect trust relationships -- **Age Encryption**: Modern encryption for sensitive data - -### **Network Security** -- **WireGuard VPN**: Secure peer-to-peer communication -- **Firewall & NAT**: Granular access control -- **Service Isolation**: Docker containers for each service -- **Input Validation**: All API endpoints validate input - -### **Data Protection** -- **Encrypted Storage**: Sensitive data encrypted at rest -- **Secure Communication**: TLS for all API endpoints -- **Access Control**: Role-based access for services -- **Audit Logging**: Comprehensive security event logging - ---- - -## ๐Ÿ› ๏ธ Development - -### **Project Structure** - -``` -PersonalInternetCell/ -โ”œโ”€โ”€ api/ # Backend API server -โ”‚ โ”œโ”€โ”€ base_service_manager.py # Base class for all services -โ”‚ โ”œโ”€โ”€ config_manager.py # Configuration management -โ”‚ โ”œโ”€โ”€ service_bus.py # Event-driven service bus -โ”‚ โ”œโ”€โ”€ log_manager.py # Comprehensive logging -โ”‚ โ”œโ”€โ”€ enhanced_cli.py # Enhanced CLI tool -โ”‚ โ”œโ”€โ”€ network_manager.py # DNS, DHCP, NTP -โ”‚ โ”œโ”€โ”€ wireguard_manager.py # VPN and peer management -โ”‚ โ”œโ”€โ”€ email_manager.py # Email services -โ”‚ โ”œโ”€โ”€ calendar_manager.py # Calendar services -โ”‚ โ”œโ”€โ”€ file_manager.py # File storage -โ”‚ โ”œโ”€โ”€ routing_manager.py # Routing and NAT -โ”‚ โ”œโ”€โ”€ vault_manager.py # Security and trust -โ”‚ โ”œโ”€โ”€ container_manager.py # Container orchestration -โ”‚ โ”œโ”€โ”€ cell_manager.py # Overall cell management -โ”‚ โ”œโ”€โ”€ peer_registry.py # Peer registration -โ”‚ โ””โ”€โ”€ app.py # Main API server -โ”œโ”€โ”€ webui/ # React frontend -โ”œโ”€โ”€ config/ # Configuration files -โ”œโ”€โ”€ data/ # Persistent data -โ”œโ”€โ”€ tests/ # Test suite -โ””โ”€โ”€ docker-compose.yml # Container orchestration -``` - -### **Running Locally** - -```bash -# Install dependencies -pip install -r api/requirements.txt - -# Start the API server -python api/app.py - -# Run tests -python api/test_enhanced_api.py - -# Start frontend (if available) -cd webui && bun install && npm run dev -``` - -### **Service Development** - -```python -from base_service_manager import BaseServiceManager - -class MyServiceManager(BaseServiceManager): - def __init__(self, data_dir='/app/data', config_dir='/app/config'): - super().__init__('myservice', data_dir, config_dir) - - def get_status(self) -> Dict[str, Any]: - # Implement service status - pass - - def test_connectivity(self) -> Dict[str, Any]: - # Implement connectivity test - pass -``` - ---- - -## ๐Ÿงช Testing - -### **Test Suite** - -```bash -# Run all tests -python api/test_enhanced_api.py - -# Test specific components -python -m pytest api/tests/test_network_manager.py -python -m pytest api/tests/test_service_bus.py - -# Coverage report -coverage run -m pytest api/tests/ -coverage html -``` - -### **Test Coverage** -- **BaseServiceManager**: 100% coverage -- **ConfigManager**: 95%+ coverage -- **ServiceBus**: 95%+ coverage -- **LogManager**: 95%+ coverage -- **All Service Managers**: 77%+ overall coverage - ---- - -## ๐Ÿš€ Deployment - -### **Docker Deployment** - -```bash -# Production deployment -docker-compose -f docker-compose.prod.yml up -d - -# Development deployment -docker-compose up --build -``` - -### **System Requirements** -- **CPU**: 2+ cores -- **RAM**: 2GB+ (4GB recommended) -- **Storage**: 10GB+ (SSD recommended) -- **Network**: Stable internet connection - -### **Monitoring** - -```bash -# Health check -curl http://localhost:3000/health - -# Service status -curl http://localhost:3000/api/services/status - -# Service connectivity -curl http://localhost:3000/api/services/connectivity -``` - ---- - -## ๐Ÿค Contributing - -1. **Fork** the repository -2. **Create** a feature branch -3. **Implement** your changes -4. **Add tests** for new functionality -5. **Submit** a pull request - -### **Development Guidelines** -- Follow the existing code style -- Add comprehensive tests -- Update documentation -- Use the BaseServiceManager pattern -- Implement proper error handling - ---- - -## ๐Ÿ“„ License - -MIT License - see [LICENSE](LICENSE) file for details. - ---- - -## ๐Ÿ“š Documentation - -- **[Quick Start Guide](QUICKSTART.md)**: Get up and running quickly -- **[API Documentation](api/API_DOCUMENTATION.md)**: Complete API reference -- **[Comprehensive Improvements](COMPREHENSIVE_IMPROVEMENTS_SUMMARY.md)**: Detailed architecture overview -- **[Enhanced API Improvements](ENHANCED_API_IMPROVEMENTS.md)**: Technical implementation details - ---- - -**๐ŸŒŸ The Personal Internet Cell - Your self-hosted, production-grade digital infrastructure!** + +# Personal Internet Cell (PIC) + +A self-hosted digital infrastructure platform. One stack, one API, one UI โ€” managing DNS, DHCP, NTP, WireGuard VPN, email, calendar/contacts, file storage, and a reverse proxy on your own hardware. + +--- + +## What it does + +- **Network services** โ€” CoreDNS, dnsmasq DHCP, chrony NTP, all dynamically managed +- **WireGuard VPN** โ€” peer lifecycle, QR-code provisioning, per-peer service access control +- **Digital services** โ€” Email (Postfix/Dovecot), Calendar/Contacts (Radicale CalDAV), Files (WebDAV + Filegator) +- **Reverse proxy** โ€” Caddy with per-service virtual IPs; subdomains like `calendar.mycell.cell` work on VPN clients automatically +- **Certificate authority** โ€” self-hosted CA via VaultManager +- **Cell mesh** โ€” connect two PIC instances with site-to-site WireGuard + DNS forwarding + +Everything is configured through a REST API and a React web UI. No manual config file editing needed for normal operations. + +--- + +## Quick Start + +### Prerequisites + +- Debian/Ubuntu host (apt-based) +- 2 GB+ RAM, 10 GB+ disk +- Open ports: 53 (DNS), 80 (HTTP), 3000 (API), 8081 (Web UI), 51820/udp (WireGuard) + +### Install + +```bash +git clone pic +cd pic + +# Install system deps (docker, python3, python3-cryptography, etc.) +make check-deps + +# Generate keys + write configs +make setup + +# Build and start all 12 containers +make start +``` + +`make setup` accepts overrides for a second cell on a different host: + +```bash +CELL_NAME=pic1 VPN_ADDRESS=10.1.0.1/24 make setup && make start +``` + +### Access + +| Service | URL | +|---------|-----| +| Web UI | `http://:8081` | +| API | `http://:3000` | +| Health | `http://:3000/health` | + +From a WireGuard client: `http://mycell.cell` (replace with your cell name/domain). + +### Local dev (no Docker) + +```bash +pip install -r api/requirements.txt +python api/app.py # Flask API on :3000 + +cd webui && npm install && npm run dev # React UI on :5173 (proxies /api โ†’ :3000) +``` + +--- + +## Management Commands + +```bash +# First install +make check-deps # install system packages via apt +make setup # generate keys, write configs, create data dirs +make start # start all 12 containers + +# Daily operations +make status # container status + API health +make logs # follow all container logs +make logs-api # follow logs for one service (api, dns, wg, mail, caddy, ...) +make shell-api # shell inside a container + +# Deploy latest code +make update # git pull + rebuild api image + restart + +# Maintenance +make backup # tar config/ + data/ into backups/ +make restore # list available backups and restore +make clean # remove containers/volumes, keep config/data + +# Full wipe (test machines) +make reinstall # stop, wipe config/data, setup, start fresh +make uninstall # stop + remove images; prompts to also wipe config/data + +# Tests +make test # run full pytest suite +make test-coverage # tests + HTML coverage report in htmlcov/ +``` + +--- + +## Connecting Two Cells (PIC Mesh) + +Two PIC instances form a mesh: site-to-site WireGuard tunnels with automatic DNS forwarding so each cell's services resolve from the other. + +### Exchange invites + +1. On **Cell A** โ†’ Web UI โ†’ **Cell Network** โ†’ copy the invite JSON. +2. On **Cell B** โ†’ **Cell Network** โ†’ paste into "Connect to Another Cell" โ†’ **Connect**. +3. On **Cell B** โ†’ copy its invite JSON. +4. On **Cell A** โ†’ paste Cell B's invite โ†’ **Connect**. + +Both cells now have a WireGuard peer with `AllowedIPs = remote VPN subnet` and a CoreDNS forwarding block so `*.pic1.cell` resolves across the tunnel. + +### Same-LAN tip + +If both cells share the same external IP (behind NAT), replace the auto-detected endpoint with the LAN IP before connecting: + +```json +{ "endpoint": "192.168.31.50:51820", ... } +``` + +--- + +## Architecture + +### Stack + +``` +cell-caddy (Caddy) :80/:443 + per-service virtual IPs +cell-api (Flask :3000) REST API + config management + container orchestration +cell-webui (Nginx :8081) React UI +cell-dns (CoreDNS :53) internal DNS + per-peer ACLs +cell-dhcp (dnsmasq) DHCP + static reservations +cell-ntp (chrony) NTP +cell-wireguard WireGuard VPN +cell-mail (docker-mailserver) SMTP/IMAP +cell-radicale CalDAV/CardDAV :5232 +cell-webdav WebDAV :80 +cell-filegator file manager UI :8080 +cell-rainloop webmail :8888 +``` + +All containers share a custom Docker bridge network. Static IPs are assigned in `docker-compose.yml`. Caddy adds per-service virtual IPs to its own interface at API startup so `calendar.`, `files.`, etc. route to the right container. + +### Backend (`api/`) + +Service managers (`network_manager.py`, `wireguard_manager.py`, `peer_registry.py`, etc.) all inherit `BaseServiceManager`. `app.py` contains all Flask routes โ€” one file, organized by service. + +`ConfigManager` (`config_manager.py`) is the single source of truth. Config lives in `config/api/cell_config.json`. All managers read/write through it. + +`ip_utils.py` owns all container IP logic via `CONTAINER_OFFSETS` โ€” do not hardcode IPs elsewhere. + +When a config change requires recreating the Docker network (e.g. `ip_range` change), the API spawns a helper container that outlives cell-api to run `docker compose down && up`. Other restarts run `compose up -d --no-deps ` directly. + +### Frontend (`webui/`) + +React 18 + Vite + Tailwind CSS. All API calls go through `src/services/api.js` (Axios). Vite dev server proxies `/api` to `localhost:3000`. Pages in `src/pages/`, shared components in `src/components/`. + +### Project layout + +``` +pic/ +โ”œโ”€โ”€ api/ # Flask API + all service managers +โ”‚ โ”œโ”€โ”€ app.py # all routes (~2700 lines) +โ”‚ โ”œโ”€โ”€ config_manager.py # unified config CRUD +โ”‚ โ”œโ”€โ”€ ip_utils.py # IP/CIDR helpers + Caddyfile generator +โ”‚ โ”œโ”€โ”€ firewall_manager.py # iptables (via cell-wireguard) + Corefile +โ”‚ โ”œโ”€โ”€ network_manager.py # DNS zones, DHCP, NTP +โ”‚ โ”œโ”€โ”€ wireguard_manager.py +โ”‚ โ”œโ”€โ”€ peer_registry.py +โ”‚ โ”œโ”€โ”€ vault_manager.py +โ”‚ โ”œโ”€โ”€ email_manager.py +โ”‚ โ”œโ”€โ”€ calendar_manager.py +โ”‚ โ”œโ”€โ”€ file_manager.py +โ”‚ โ””โ”€โ”€ container_manager.py +โ”œโ”€โ”€ webui/ # React frontend +โ”œโ”€โ”€ config/ # Config files (bind-mounted into containers) +โ”‚ โ”œโ”€โ”€ api/cell_config.json โ† live config +โ”‚ โ”œโ”€โ”€ caddy/Caddyfile +โ”‚ โ”œโ”€โ”€ dns/Corefile +โ”‚ โ””โ”€โ”€ ... +โ”œโ”€โ”€ data/ # Persistent data (git-ignored) +โ”œโ”€โ”€ tests/ # pytest suite (372 tests, 27 files) +โ”œโ”€โ”€ docker-compose.yml +โ””โ”€โ”€ Makefile +``` + +--- + +## API Reference + +### Config + +``` +GET /api/config full config + service IPs +PUT /api/config update identity or service config +GET /api/config/pending pending restart info +POST /api/config/apply apply pending restart +POST /api/config/backup create backup +POST /api/config/restore/ restore from backup +``` + +### Network + +``` +GET /api/dns/records +POST /api/dns/records +GET /api/dhcp/leases +GET /api/dhcp/reservations +POST /api/dhcp/reservations +``` + +### WireGuard & Peers + +``` +GET /api/wireguard/status +GET /api/wireguard/peers +POST /api/wireguard/peers +GET /api/peers +POST /api/peers +PUT /api/peers/ +DELETE /api/peers/ +GET /api/peers//config peer config + QR code +``` + +### Containers & Health + +``` +GET /api/containers +POST /api/containers//restart +GET /health +GET /api/services/status +``` + +--- + +## Testing + +```bash +make test # run full suite +make test-coverage # coverage report in htmlcov/ +pytest tests/test_.py # single file +pytest tests/ -k "test_name" # single test +``` + +Tests live in `tests/` and use `unittest.TestCase` collected by pytest. External system calls (Docker, iptables, file writes) are mocked with `unittest.mock.patch`. + +Known coverage gaps: `write_caddyfile`, `POST /api/config/apply` (helper container path), `PUT /api/config` 400 validation paths. These are the highest-risk untested paths. + +--- + +## Security Notes + +- The API is access-controlled by `is_local_request()` โ€” it checks whether the request comes from a local/loopback/cell-network IP. Sensitive endpoints (containers, vault) are restricted to local access only. +- All per-peer service access is enforced via iptables rules inside `cell-wireguard` and CoreDNS ACL blocks. +- The Docker socket is mounted into `cell-api` for container management โ€” treat network access to port 3000 as privileged. +- `ip_range` must be an RFC-1918 CIDR (10.0.0.0/8, 172.16.0.0/12, or 192.168.0.0/16). The API and UI both validate this. + +--- + +## License + +MIT โ€” see [LICENSE](LICENSE). diff --git a/api/app.py b/api/app.py index 18a0520..4d51e04 100644 --- a/api/app.py +++ b/api/app.py @@ -179,7 +179,6 @@ email_manager = EmailManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR) calendar_manager = CalendarManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR) file_manager = FileManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR) routing_manager = RoutingManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR) -cell_manager = CellManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR) app.vault_manager = VaultManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR) container_manager = ContainerManager(data_dir=_DATA_DIR, config_dir=_CONFIG_DIR) cell_link_manager = CellLinkManager( @@ -320,31 +319,37 @@ health_monitor_thread = threading.Thread(target=health_monitor_loop, daemon=True health_monitor_thread.start() def is_local_request(): - # Allow requests from localhost, Docker networks, and internal IPs remote_addr = request.remote_addr forwarded_for = request.headers.get('X-Forwarded-For', '') - - # Check direct remote address - if remote_addr in ('127.0.0.1', '::1', 'localhost'): - return True - - # Check forwarded address (for reverse proxy scenarios) - if forwarded_for: - forwarded_ips = [ip.strip() for ip in forwarded_for.split(',')] - for ip in forwarded_ips: - if ip in ('127.0.0.1', '::1', 'localhost'): - return True - - # Allow Docker internal networks (172.x.x.x, 192.168.x.x, 10.x.x.x) - if remote_addr: + + def _allowed(addr): + if not addr: + return False + if addr in ('127.0.0.1', '::1', 'localhost'): + return True try: - import ipaddress - ip = ipaddress.ip_address(remote_addr) + import ipaddress as _ipa + ip = _ipa.ip_address(addr) if ip.is_private or ip.is_loopback: return True - except: + # Also allow IPs in the configured cell-network, which may fall outside + # RFC-1918 (e.g. 172.0.0.0/24 is not in 172.16.0.0/12). + cell_net = config_manager.configs.get('_identity', {}).get( + 'ip_range', os.environ.get('CELL_IP_RANGE', '172.20.0.0/16')) + if ip in _ipa.ip_network(cell_net, strict=False): + return True + except Exception: pass - + return False + + if _allowed(remote_addr): + return True + # Only trust the LAST X-Forwarded-For entry โ€” that is what Caddy appended. + # Iterating all entries allows clients to spoof local origin by prepending 127.0.0.1. + if forwarded_for: + last_hop = forwarded_for.split(',')[-1].strip() + if _allowed(last_hop): + return True return False @app.route('/health', methods=['GET']) @@ -431,6 +436,59 @@ def update_config(): # Handle identity fields (cell_name, domain, ip_range, wireguard_port) identity_keys = {'cell_name', 'domain', 'ip_range', 'wireguard_port'} identity_updates = {k: v for k, v in data.items() if k in identity_keys} + + # Validate ip_range โ€” must be a valid CIDR within an RFC-1918 range + if 'ip_range' in identity_updates: + import ipaddress as _ipa + _rfc1918 = [ + _ipa.ip_network('10.0.0.0/8'), + _ipa.ip_network('172.16.0.0/12'), + _ipa.ip_network('192.168.0.0/16'), + ] + try: + _net = _ipa.ip_network(identity_updates['ip_range'], strict=False) + if not any(_net.subnet_of(r) for r in _rfc1918): + return jsonify({'error': ( + 'ip_range must be within an RFC-1918 private range ' + '(10.0.0.0/8, 172.16.0.0/12, or 192.168.0.0/16)' + )}), 400 + except ValueError as _e: + return jsonify({'error': f'Invalid ip_range: {_e}'}), 400 + + # Validate service config port and IP fields + _port_fields = { + 'network': ['dns_port'], + 'wireguard': ['port'], + 'email': ['smtp_port', 'submission_port', 'imap_port', 'webmail_port'], + 'calendar': ['port'], + 'files': ['port', 'manager_port'], + } + for _svc, _fields in _port_fields.items(): + if _svc not in data: + continue + _svc_data = data[_svc] + if not isinstance(_svc_data, dict): + continue + for _f in _fields: + if _f in _svc_data and _svc_data[_f] is not None and _svc_data[_f] != '': + try: + _p = int(_svc_data[_f]) + if not (1 <= _p <= 65535): + raise ValueError() + except (ValueError, TypeError): + return jsonify({'error': f'{_svc}.{_f} must be an integer between 1 and 65535'}), 400 + # Validate WireGuard address (must be valid IP/CIDR) + if 'wireguard' in data and isinstance(data['wireguard'], dict): + _addr = data['wireguard'].get('address') + if _addr: + import ipaddress as _ipa2 + if '/' not in str(_addr): + return jsonify({'error': 'wireguard.address must include a prefix length (e.g. 10.0.0.1/24)'}), 400 + try: + _ipa2.ip_interface(_addr) + except ValueError as _e: + return jsonify({'error': f'wireguard.address is not a valid IP/CIDR: {_e}'}), 400 + # Capture old identity and service configs BEFORE saving, for change detection old_identity = dict(config_manager.configs.get('_identity', {})) old_svc_configs = { @@ -692,13 +750,19 @@ def apply_pending_config(): if not pending.get('needs_restart'): return jsonify({'message': 'No pending changes to apply'}) - # Get project working dir from our own container labels (set by docker-compose) + # Get project working dir and image name from our own container labels project_dir = '/home/roof/pic' + api_image = 'pic_api:latest' # fallback (docker-compose v1 naming) try: import docker as _docker_sdk _client = _docker_sdk.from_env() _self = _client.containers.get('cell-api') project_dir = _self.labels.get('com.docker.compose.project.working_dir', project_dir) + # Use the actual image tag so the helper works regardless of compose version + # (docker-compose v1 builds pic_api:latest, compose v2+ builds pic-api:latest) + tags = _self.image.tags + if tags: + api_image = tags[0] except Exception: pass @@ -717,8 +781,8 @@ def apply_pending_config(): if '*' in containers: # All-services restart: `docker compose down` or `up -d` may stop/recreate the # API container itself, killing this background thread mid-operation. - # Spawn an independent helper container using pic_api:latest that has docker CLI - # and survives cell-api being stopped/recreated. + # Spawn an independent helper container (same image as cell-api) that has docker + # CLI and survives cell-api being stopped/recreated. if needs_network_recreate: helper_script = ( f'sleep 2' @@ -741,7 +805,7 @@ def apply_pending_config(): '-v', '/var/run/docker.sock:/var/run/docker.sock', '-v', f'{project_dir}:{project_dir}', '--entrypoint', 'sh', - 'pic_api:latest', + api_image, '-c', helper_script], close_fds=True, stdout=_subprocess.DEVNULL, @@ -1105,10 +1169,13 @@ def get_dhcp_leases(): def add_dhcp_reservation(): try: data = request.get_json(silent=True) - if data is None: + if not data: return jsonify({"error": "No data provided"}), 400 - result = network_manager.add_dhcp_reservation(data) - return jsonify(result) + for field in ('mac', 'ip'): + if field not in data: + return jsonify({"error": f"Missing required field: {field}"}), 400 + result = network_manager.add_dhcp_reservation(data['mac'], data['ip'], data.get('hostname', '')) + return jsonify({"success": result}) except Exception as e: logger.error(f"Error adding DHCP reservation: {e}") return jsonify({"error": str(e)}), 500 @@ -1118,8 +1185,10 @@ def remove_dhcp_reservation(): """Remove DHCP reservation.""" try: data = request.get_json(silent=True) - result = network_manager.remove_dhcp_reservation(data) - return jsonify(result) + if not data or 'mac' not in data: + return jsonify({"error": "Missing required field: mac"}), 400 + result = network_manager.remove_dhcp_reservation(data['mac']) + return jsonify({"success": result}) except Exception as e: logger.error(f"Error removing DHCP reservation: {e}") return jsonify({"error": str(e)}), 500 @@ -1157,10 +1226,7 @@ def get_dns_status(): @app.route('/api/network/test', methods=['POST']) def test_network(): try: - data = request.get_json(silent=True) - if data is None: - return jsonify({"error": "No data provided"}), 400 - result = network_manager.test_connectivity(data) + result = network_manager.test_connectivity() return jsonify(result) except Exception as e: logger.error(f"Error testing network: {e}") @@ -1511,6 +1577,12 @@ def add_peer(): assigned_ip = data.get('ip') or _next_peer_ip() + # Validate service_access if provided + _valid_services = {'calendar', 'files', 'mail', 'webdav'} + service_access = data.get('service_access', list(_valid_services)) + if not isinstance(service_access, list) or not all(s in _valid_services for s in service_access): + return jsonify({"error": f"service_access must be a list of: {sorted(_valid_services)}"}), 400 + # Add peer to registry with all provided fields peer_info = { 'peer': data['name'], @@ -1523,7 +1595,7 @@ def add_peer(): 'persistent_keepalive': data.get('persistent_keepalive'), 'description': data.get('description'), 'internet_access': data.get('internet_access', True), - 'service_access': data.get('service_access', ['calendar', 'files', 'mail', 'webdav']), + 'service_access': service_access, 'peer_access': data.get('peer_access', True), 'config_needs_reinstall': False, } @@ -1590,10 +1662,17 @@ def clear_peer_reinstall(peer_name): @app.route('/api/peers/', methods=['DELETE']) def remove_peer(peer_name): - """Remove a peer.""" + """Remove a peer and clean up its firewall rules and DNS ACLs.""" try: + peer = peer_registry.get_peer(peer_name) + if not peer: + return jsonify({"message": f"Peer {peer_name} not found or already removed"}) + peer_ip = peer.get('ip') success = peer_registry.remove_peer(peer_name) if success: + if peer_ip: + firewall_manager.clear_peer_rules(peer_ip) + firewall_manager.apply_all_dns_rules(peer_registry.list_peers(), COREFILE_PATH, _configured_domain()) return jsonify({"message": f"Peer {peer_name} removed successfully"}) else: return jsonify({"message": f"Peer {peer_name} not found or already removed"}) @@ -2497,8 +2576,8 @@ def restart_container(name): @app.route('/api/containers//logs', methods=['GET']) def get_container_logs(name): # Temporarily disable access control for debugging - # if not is_local_request(): - # return jsonify({'error': 'Access denied'}), 403 + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 tail = request.args.get('tail', default=100, type=int) try: logs = container_manager.get_container_logs(name, tail=tail) @@ -2510,8 +2589,8 @@ def get_container_logs(name): @app.route('/api/containers//stats', methods=['GET']) def get_container_stats(name): # Temporarily disable access control for debugging - # if not is_local_request(): - # return jsonify({'error': 'Access denied'}), 403 + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 try: stats = container_manager.get_container_stats(name) return jsonify(stats) @@ -2522,16 +2601,16 @@ def get_container_stats(name): @app.route('/api/vault/secrets', methods=['GET']) def list_secrets(): # Temporarily disable access control for debugging - # if not is_local_request(): - # return jsonify({'error': 'Access denied'}), 403 + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 secrets = app.vault_manager.list_secrets() return jsonify({'secrets': secrets}) @app.route('/api/vault/secrets', methods=['POST']) def store_secret(): # Temporarily disable access control for debugging - # if not is_local_request(): - # return jsonify({'error': 'Access denied'}), 403 + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 data = request.get_json(silent=True) if not data or 'name' not in data or 'value' not in data: return jsonify({'error': 'Missing name or value'}), 400 @@ -2541,8 +2620,8 @@ def store_secret(): @app.route('/api/vault/secrets/', methods=['GET']) def get_secret(name): # Temporarily disable access control for debugging - # if not is_local_request(): - # return jsonify({'error': 'Access denied'}), 403 + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 value = app.vault_manager.get_secret(name) if value is None: return jsonify({'error': 'Not found'}), 404 @@ -2551,8 +2630,8 @@ def get_secret(name): @app.route('/api/vault/secrets/', methods=['DELETE']) def delete_secret(name): # Temporarily disable access control for debugging - # if not is_local_request(): - # return jsonify({'error': 'Access denied'}), 403 + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 result = app.vault_manager.delete_secret(name) return jsonify({'deleted': result}) @@ -2560,8 +2639,8 @@ def delete_secret(name): @app.route('/api/containers', methods=['POST']) def create_container(): # Temporarily disable access control for debugging - # if not is_local_request(): - # return jsonify({'error': 'Access denied'}), 403 + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 data = request.get_json(silent=True) if not data or 'image' not in data: return jsonify({'error': 'Missing image parameter'}), 400 @@ -2592,8 +2671,8 @@ def create_container(): @app.route('/api/containers/', methods=['DELETE']) def remove_container(name): # Temporarily disable access control for debugging - # if not is_local_request(): - # return jsonify({'error': 'Access denied'}), 403 + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 force = request.args.get('force', default=False, type=bool) success = container_manager.remove_container(name, force=force) return jsonify({'removed': success}) @@ -2601,8 +2680,8 @@ def remove_container(name): @app.route('/api/images', methods=['GET']) def list_images(): # Temporarily disable access control for debugging - # if not is_local_request(): - # return jsonify({'error': 'Access denied'}), 403 + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 images = container_manager.list_images() return jsonify(images) @@ -2629,8 +2708,8 @@ def remove_image(image): @app.route('/api/volumes', methods=['GET']) def list_volumes(): # Temporarily disable access control for debugging - # if not is_local_request(): - # return jsonify({'error': 'Access denied'}), 403 + if not is_local_request(): + return jsonify({'error': 'Access denied'}), 403 volumes = container_manager.list_volumes() return jsonify(volumes) diff --git a/api/config_manager.py b/api/config_manager.py index eb8f0fd..fd9cca5 100644 --- a/api/config_manager.py +++ b/api/config_manager.py @@ -117,11 +117,15 @@ class ConfigManager: return {} def _save_all_configs(self): - """Save all service configurations to the unified config file""" + """Save all service configurations to the unified config file (atomic write).""" try: self.config_file.parent.mkdir(parents=True, exist_ok=True) - with open(self.config_file, 'w') as f: + tmp = self.config_file.with_suffix('.tmp') + with open(tmp, 'w') as f: json.dump(self.configs, f, indent=2) + f.flush() + os.fsync(f.fileno()) + os.replace(tmp, self.config_file) except (PermissionError, OSError): pass @@ -208,62 +212,98 @@ class ConfigManager: } def backup_config(self) -> str: - """Create a backup of all configurations""" + """Create a backup of cell_config.json, secrets, Caddyfile, .env, Corefile, and DNS zones.""" try: timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') backup_id = f"backup_{timestamp}" backup_path = self.backup_dir / backup_id - - # Create backup directory backup_path.mkdir(parents=True, exist_ok=True) - - # Copy all config files + + # Primary config and secrets if self.config_file.exists(): shutil.copy2(self.config_file, backup_path / 'cell_config.json') - - # Copy secrets file if it exists if self.secrets_file.exists(): shutil.copy2(self.secrets_file, backup_path / 'secrets.yaml') - - # Create backup manifest + + # Runtime-generated files that must match cell_config.json after restore + config_dir = Path(os.environ.get('CONFIG_DIR', '/app/config')) + data_dir = Path(os.environ.get('DATA_DIR', '/app/data')) + env_file = Path(os.environ.get('ENV_FILE', '/app/.env')) + + extra = [ + (config_dir / 'caddy' / 'Caddyfile', 'Caddyfile'), + (config_dir / 'dns' / 'Corefile', 'Corefile'), + (env_file, '.env'), + ] + for src, dest_name in extra: + if src.exists(): + shutil.copy2(src, backup_path / dest_name) + + # DNS zone files + dns_data = data_dir / 'dns' + if dns_data.is_dir(): + zones_dir = backup_path / 'dns_zones' + zones_dir.mkdir(exist_ok=True) + for zone_file in dns_data.glob('*.zone'): + shutil.copy2(zone_file, zones_dir / zone_file.name) + manifest = { "backup_id": backup_id, "timestamp": datetime.now().isoformat(), "services": list(self.service_schemas.keys()), - "files": [f.name for f in backup_path.iterdir()] + "files": [f.name for f in backup_path.iterdir()], } - with open(backup_path / 'manifest.json', 'w') as f: json.dump(manifest, f, indent=2) - + logger.info(f"Created configuration backup: {backup_id}") return backup_id - + except Exception as e: logger.error(f"Error creating backup: {e}") raise def restore_config(self, backup_id: str) -> bool: - """Restore configuration from backup""" + """Restore cell_config.json, secrets, Caddyfile, .env, Corefile, and DNS zones from backup.""" try: backup_path = self.backup_dir / backup_id if not backup_path.exists(): raise ValueError(f"Backup {backup_id} not found") - # Read manifest manifest_file = backup_path / 'manifest.json' if not manifest_file.exists(): raise ValueError(f"Backup manifest not found") - with open(manifest_file, 'r') as f: - manifest = json.load(f) - # Restore config files + + # Restore primary config config_backup = backup_path / 'cell_config.json' if config_backup.exists(): shutil.copy2(config_backup, self.config_file) - # Restore secrets file if it exists secrets_backup = backup_path / 'secrets.yaml' if secrets_backup.exists(): shutil.copy2(secrets_backup, self.secrets_file) - # Reload configurations โ€” restore only what was in the backup + + # Restore runtime-generated files so they stay consistent with cell_config.json + config_dir = Path(os.environ.get('CONFIG_DIR', '/app/config')) + data_dir = Path(os.environ.get('DATA_DIR', '/app/data')) + env_file = Path(os.environ.get('ENV_FILE', '/app/.env')) + + restore_map = [ + (backup_path / 'Caddyfile', config_dir / 'caddy' / 'Caddyfile'), + (backup_path / 'Corefile', config_dir / 'dns' / 'Corefile'), + (backup_path / '.env', env_file), + ] + for src, dest in restore_map: + if src.exists(): + dest.parent.mkdir(parents=True, exist_ok=True) + shutil.copy2(src, dest) + + # Restore DNS zone files + zones_backup = backup_path / 'dns_zones' + if zones_backup.is_dir(): + dns_data = data_dir / 'dns' + dns_data.mkdir(parents=True, exist_ok=True) + for zone_file in zones_backup.glob('*.zone'): + shutil.copy2(zone_file, dns_data / zone_file.name) + self.configs = self._load_all_configs() logger.info(f"Restored configuration from backup: {backup_id}") return True diff --git a/api/firewall_manager.py b/api/firewall_manager.py index 51d65e1..01572c5 100644 --- a/api/firewall_manager.py +++ b/api/firewall_manager.py @@ -276,14 +276,16 @@ def generate_corefile(peers: List[Dict[str, Any]], corefile_path: str = COREFILE }} {primary_zone_block} -local.{domain} {{ - file /data/local.zone - log -}} """ + # local.{domain} block intentionally omitted: /data/local.zone does not exist + # and CoreDNS logs errors on every reload for a missing zone file. os.makedirs(os.path.dirname(corefile_path), exist_ok=True) - with open(corefile_path, 'w') as f: + tmp_path = corefile_path + '.tmp' + with open(tmp_path, 'w') as f: f.write(corefile) + f.flush() + os.fsync(f.fileno()) + os.replace(tmp_path, corefile_path) logger.info(f"Wrote Corefile to {corefile_path}") return True @@ -293,13 +295,13 @@ local.{domain} {{ def reload_coredns() -> bool: - """Send SIGHUP to CoreDNS container to reload config.""" + """Signal CoreDNS to reload its config. SIGUSR1 triggers the reload plugin; SIGHUP kills the process.""" try: - result = _run(['docker', 'kill', '--signal=SIGHUP', 'cell-dns'], check=False) + result = _run(['docker', 'kill', '--signal=SIGUSR1', 'cell-dns'], check=False) if result.returncode == 0: - logger.info("Sent SIGHUP to cell-dns") + logger.info("Sent SIGUSR1 to cell-dns (reload)") return True - logger.warning(f"SIGHUP to cell-dns failed: {result.stderr.strip()}") + logger.warning(f"SIGUSR1 to cell-dns failed: {result.stderr.strip()}") return False except Exception as e: logger.error(f"reload_coredns: {e}") diff --git a/api/ip_utils.py b/api/ip_utils.py index 007d17e..0837cb2 100644 --- a/api/ip_utils.py +++ b/api/ip_utils.py @@ -200,8 +200,12 @@ http://api.{domain} {{ }} """ os.makedirs(os.path.dirname(os.path.abspath(path)), exist_ok=True) - with open(path, 'w') as f: + tmp = path + '.tmp' + with open(tmp, 'w') as f: f.write(content) + f.flush() + os.fsync(f.fileno()) + os.replace(tmp, path) return True except Exception: return False @@ -229,8 +233,12 @@ def write_env_file(ip_range: str, path: str, ports: Optional[Dict[str, int]] = N for key, var in PORT_ENV_VAR_NAMES.items(): lines.append(f'{var}={merged_ports[key]}\n') os.makedirs(os.path.dirname(os.path.abspath(path)), exist_ok=True) - with open(path, 'w') as f: + tmp = path + '.tmp' + with open(tmp, 'w') as f: f.writelines(lines) + f.flush() + os.fsync(f.fileno()) + os.replace(tmp, path) return True except Exception: return False diff --git a/api/network_manager.py b/api/network_manager.py index 6721ec6..a28bc69 100644 --- a/api/network_manager.py +++ b/api/network_manager.py @@ -33,10 +33,14 @@ class NetworkManager(BaseServiceManager): # Create zone file content content = self._generate_zone_content(zone_name, records) - - with open(zone_file, 'w') as f: + + tmp_file = zone_file + '.tmp' + with open(tmp_file, 'w') as f: f.write(content) - + f.flush() + os.fsync(f.fileno()) + os.replace(tmp_file, zone_file) + # Reload DNS service self._reload_dns_service() diff --git a/api/routing_manager.py b/api/routing_manager.py index 63a3d7f..024c151 100644 --- a/api/routing_manager.py +++ b/api/routing_manager.py @@ -2,6 +2,16 @@ """ Routing Manager for Personal Internet Cell Handles VPN gateway, NAT, iptables, and advanced routing + +NOTE: This manager runs iptables/ip-route commands on the HOST (the machine running +docker-compose), not inside cell-wireguard. This is intentional for host-level +routing features (exit-node, bridge, split-route) that are not yet wired to any +UI endpoint. The manager is instantiated but its methods are not called by any +active API route. + +CRITICAL: _remove_nat_rule flushes ALL of POSTROUTING (-F), which would wipe the +WireGuard MASQUERADE rule. Do not call it until this is fixed to use targeted +rule deletion (-D) instead of a full flush. """ import os @@ -766,14 +776,18 @@ class RoutingManager(BaseServiceManager): logger.error(f"Failed to apply NAT rule: {e}") def _remove_nat_rule(self, rule_id: str): - """Remove NAT rule from iptables""" + """Remove NAT rule from iptables by rule_id comment tag.""" try: - # This is a simplified removal - in practice you'd need to track the exact rule - cmd = ['iptables', '-t', 'nat', '-F', 'POSTROUTING'] - subprocess.run(cmd, check=True, timeout=10) - - logger.info(f"Removed NAT rule: {rule_id}") - + # Use -D with the comment tag to remove the specific rule rather than + # flushing the entire POSTROUTING chain (which would wipe WireGuard MASQUERADE). + cmd = ['iptables', '-t', 'nat', '-D', 'POSTROUTING', + '-m', 'comment', '--comment', rule_id, '-j', 'MASQUERADE'] + result = subprocess.run(cmd, timeout=10) + if result.returncode != 0: + # Rule may not exist โ€” not an error + logger.debug(f"NAT rule {rule_id} not found (already removed?)") + else: + logger.info(f"Removed NAT rule: {rule_id}") except Exception as e: logger.error(f"Failed to remove NAT rule: {e}") diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..54d7b56 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,45 @@ +""" +Shared pytest fixtures for the PIC test suite. +""" +import os +import sys +import json +import tempfile +import shutil +import pytest + +# Ensure api/ is on the path for all tests +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'api')) + + +@pytest.fixture +def tmp_dir(): + """Temporary directory that is cleaned up after each test.""" + d = tempfile.mkdtemp() + yield d + shutil.rmtree(d, ignore_errors=True) + + +@pytest.fixture +def tmp_config_dir(tmp_dir): + """Temporary config dir with the sub-directories expected by managers.""" + for sub in ('api', 'caddy', 'dns', 'dhcp', 'ntp', 'wireguard'): + os.makedirs(os.path.join(tmp_dir, sub), exist_ok=True) + return tmp_dir + + +@pytest.fixture +def tmp_data_dir(tmp_dir): + """Temporary data dir with the sub-directories expected by managers.""" + for sub in ('dns', 'mail', 'calendar', 'files', 'wireguard'): + os.makedirs(os.path.join(tmp_dir, sub), exist_ok=True) + return tmp_dir + + +@pytest.fixture +def flask_client(): + """Flask test client with TESTING mode enabled.""" + from app import app + app.config['TESTING'] = True + with app.test_client() as client: + yield client diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py new file mode 100644 index 0000000..cc33d3c --- /dev/null +++ b/tests/integration/conftest.py @@ -0,0 +1,97 @@ +""" +Shared fixtures for live integration tests. + +Configure with environment variables: + PIC_HOST API host (default: localhost) + PIC_API_PORT API port (default: 3000) + PIC_WEBUI_PORT WebUI port (default: 80) + PIC_WG_CONTAINER WireGuard container name (default: cell-wireguard) +""" +import os +import json +import subprocess +import pytest +import requests + +PIC_HOST = os.environ.get('PIC_HOST', 'localhost') +API_PORT = int(os.environ.get('PIC_API_PORT', '3000')) +WEBUI_PORT = int(os.environ.get('PIC_WEBUI_PORT', '80')) +WG_CONTAINER = os.environ.get('PIC_WG_CONTAINER', 'cell-wireguard') + +API_BASE = f"http://{PIC_HOST}:{API_PORT}" +WEBUI_BASE = f"http://{PIC_HOST}:{WEBUI_PORT}" + +TEST_PEERS = ( + 'integration-test-full', + 'integration-test-restricted', + 'integration-test-none', + 'bad-svc-peer', # guard against validation-test leak +) + + +@pytest.fixture(scope='session') +def api(): + s = requests.Session() + s.headers['Content-Type'] = 'application/json' + return s + + +@pytest.fixture(scope='session') +def api_base(): + return API_BASE + + +@pytest.fixture(scope='session') +def webui_base(): + return WEBUI_BASE + + +@pytest.fixture(scope='session', autouse=True) +def cleanup_test_peers(api): + """Delete any leftover test peers before and after the entire session.""" + for name in TEST_PEERS: + api.delete(f"{API_BASE}/api/peers/{name}") + yield + for name in TEST_PEERS: + api.delete(f"{API_BASE}/api/peers/{name}") + + +def iptables_forward() -> str: + """Return iptables-save output from the WireGuard container.""" + result = subprocess.run( + ['docker', 'exec', WG_CONTAINER, 'iptables-save'], + capture_output=True, text=True, timeout=10, + ) + return result.stdout + + +def peer_rules(peer_ip: str) -> list[str]: + """Return FORWARD rule lines for a specific peer IP.""" + comment = f'pic-peer-{peer_ip.replace(".", "-")}' + return [line for line in iptables_forward().splitlines() if comment in line] + + +def get_live_service_vips() -> dict: + """ + Read SERVICE_IPS directly from the running API container. + More reliable than the config API since SERVICE_IPS may not match ip_range + when the container was built before an ip_range change. + """ + import json + result = subprocess.run( + ['docker', 'exec', 'cell-api', 'python3', '-c', + 'import sys; sys.path.insert(0,"/app/api");' + ' from firewall_manager import SERVICE_IPS; import json; print(json.dumps(SERVICE_IPS))'], + capture_output=True, text=True, timeout=10, + ) + if result.returncode == 0 and result.stdout.strip(): + return json.loads(result.stdout) + # Fallback: derive from config API + cfg = requests.get(f"{API_BASE}/api/config").json() + sips = cfg.get('service_ips', {}) + return { + 'calendar': sips.get('vip_calendar', ''), + 'files': sips.get('vip_files', ''), + 'mail': sips.get('vip_mail', ''), + 'webdav': sips.get('vip_webdav', ''), + } diff --git a/tests/integration/test_live_api.py b/tests/integration/test_live_api.py new file mode 100644 index 0000000..f84858b --- /dev/null +++ b/tests/integration/test_live_api.py @@ -0,0 +1,245 @@ +""" +Read-only integration tests: health, config, containers, WireGuard, network services. + +Run with: pytest tests/integration/test_live_api.py -v +Or: PIC_HOST=192.168.31.51 pytest tests/integration/test_live_api.py -v +""" +import pytest +import sys, os +sys.path.insert(0, os.path.dirname(__file__)) +from conftest import API_BASE + +# Shorthand helpers โ€” always hits the live API +import requests as _req + +def get(path, **kw): + return _req.get(f"{API_BASE}{path}", **kw) + +def post(path, **kw): + return _req.post(f"{API_BASE}{path}", **kw) + + +# --------------------------------------------------------------------------- +# Health & status +# --------------------------------------------------------------------------- + +class TestHealth: + def test_health_returns_200(self): + r = get('/health') + assert r.status_code == 200 + + def test_health_body(self): + r = get('/health') + data = r.json() + assert data.get('status') == 'healthy' + assert 'timestamp' in data + + def test_api_status_returns_200(self): + r = get('/api/status') + assert r.status_code == 200 + + def test_api_status_body(self): + r = get('/api/status') + data = r.json() + assert 'cell_name' in data or 'status' in data + + +# --------------------------------------------------------------------------- +# Config +# --------------------------------------------------------------------------- + +class TestConfig: + def test_get_config(self): + r = get('/api/config') + assert r.status_code == 200 + + def test_config_has_required_fields(self): + data = get('/api/config').json() + for field in ('cell_name', 'domain', 'ip_range'): + assert field in data, f"config missing field: {field}" + + def test_config_ip_range_is_cidr(self): + import ipaddress + ip_range = get('/api/config').json()['ip_range'] + ipaddress.ip_network(ip_range, strict=False) # raises if invalid + + def test_pending_endpoint_reachable(self): + r = get('/api/config/pending') + assert r.status_code == 200 + + def test_backups_endpoint_reachable(self): + r = get('/api/config/backups') + assert r.status_code == 200 + assert isinstance(r.json(), list) + + +# --------------------------------------------------------------------------- +# Containers +# --------------------------------------------------------------------------- + +EXPECTED_CONTAINERS = [ + 'cell-caddy', 'cell-dns', 'cell-dhcp', 'cell-ntp', + 'cell-mail', 'cell-radicale', 'cell-webdav', 'cell-wireguard', + 'cell-api', 'cell-webui', 'cell-rainloop', 'cell-filegator', +] + +class TestContainers: + def test_containers_endpoint_reachable(self): + r = get('/api/containers') + assert r.status_code == 200 + + def test_containers_returns_list(self): + data = get('/api/containers').json() + assert isinstance(data, list) + assert len(data) > 0 + + def test_all_expected_containers_present(self): + data = get('/api/containers').json() + running = {c['name'] for c in data} + missing = set(EXPECTED_CONTAINERS) - running + assert not missing, f"Containers not found: {missing}" + + def test_all_expected_containers_running(self): + data = get('/api/containers').json() + by_name = {c['name']: c for c in data} + not_running = [ + name for name in EXPECTED_CONTAINERS + if by_name.get(name, {}).get('status') != 'running' + ] + assert not not_running, f"Containers not running: {not_running}" + + +# --------------------------------------------------------------------------- +# WireGuard +# --------------------------------------------------------------------------- + +class TestWireGuard: + def test_wireguard_status_up(self): + r = get('/api/wireguard/status') + assert r.status_code == 200 + data = r.json() + assert data.get('running') is True, f"WireGuard not running: {data}" + + def test_wireguard_interface_name(self): + data = get('/api/wireguard/status').json() + assert data.get('interface') == 'wg0' + + def test_wireguard_keys_endpoint(self): + r = get('/api/wireguard/keys') + assert r.status_code == 200 + data = r.json() + assert 'public_key' in data + + def test_wireguard_wg_peers_endpoint(self): + r = get('/api/wireguard/peers') + assert r.status_code == 200 + assert isinstance(r.json(), list) + + def test_wireguard_config_endpoint(self): + r = get('/api/wireguard/config') + assert r.status_code == 200 + + +# --------------------------------------------------------------------------- +# Network services: DNS, DHCP, NTP +# --------------------------------------------------------------------------- + +class TestNetworkServices: + def test_dns_records_endpoint(self): + r = get('/api/dns/records') + assert r.status_code == 200 + + def test_dns_status_endpoint(self): + r = get('/api/dns/status') + assert r.status_code == 200 + + def test_dhcp_leases_endpoint(self): + r = get('/api/dhcp/leases') + assert r.status_code == 200 + + def test_ntp_status_endpoint(self): + r = get('/api/ntp/status') + assert r.status_code == 200 + + def test_network_info_endpoint(self): + r = get('/api/network/info') + assert r.status_code == 200 + + +# --------------------------------------------------------------------------- +# Services bus / all-service status +# --------------------------------------------------------------------------- + +class TestServicesStatus: + def test_all_services_status_reachable(self): + r = get('/api/services/status') + assert r.status_code == 200 + + def test_services_status_has_expected_keys(self): + data = get('/api/services/status').json() + for svc in ('network', 'wireguard', 'email', 'calendar', 'files'): + assert svc in data, f"Missing service in status: {svc}" + + def test_services_connectivity_reachable(self): + r = get('/api/services/connectivity') + assert r.status_code == 200 + + def test_health_history_reachable(self): + r = get('/api/health/history') + assert r.status_code == 200 + assert isinstance(r.json(), list) + + +# --------------------------------------------------------------------------- +# Peers read-only +# --------------------------------------------------------------------------- + +class TestPeersReadOnly: + def test_peers_list_endpoint(self): + r = get('/api/peers') + assert r.status_code == 200 + assert isinstance(r.json(), list) + + def test_peers_have_required_fields(self): + peers = get('/api/peers').json() + for peer in peers: + for field in ('peer', 'ip', 'public_key', 'service_access'): + assert field in peer, f"Peer missing field '{field}': {peer}" + + def test_peer_service_access_values_are_valid(self): + valid = {'calendar', 'files', 'mail', 'webdav'} + peers = get('/api/peers').json() + for peer in peers: + for svc in peer.get('service_access', []): + assert svc in valid, f"Unknown service '{svc}' in peer {peer['peer']}" + + def test_wg_peer_statuses_endpoint(self): + r = get('/api/wireguard/peers/statuses') + assert r.status_code == 200 + + +# --------------------------------------------------------------------------- +# Input validation (no state changes) +# --------------------------------------------------------------------------- + +class TestValidation: + def test_add_peer_missing_name_returns_400(self): + r = post('/api/peers', json={'public_key': 'dummykey=='}) + assert r.status_code == 400 + + def test_add_peer_missing_key_returns_400(self): + r = post('/api/peers', json={'name': 'no-key-peer'}) + assert r.status_code == 400 + + def test_add_peer_invalid_service_access_returns_400(self): + r = post('/api/peers', json={ + 'name': 'bad-svc-peer', + 'public_key': 'dummykey==', + 'service_access': ['invalid_service'], + }) + assert r.status_code == 400 + assert 'service_access' in r.json().get('error', '') + + def test_generate_keys_missing_name_returns_400(self): + r = post('/api/wireguard/keys/peer', json={}) + assert r.status_code == 400 diff --git a/tests/integration/test_peer_lifecycle.py b/tests/integration/test_peer_lifecycle.py new file mode 100644 index 0000000..b6a9484 --- /dev/null +++ b/tests/integration/test_peer_lifecycle.py @@ -0,0 +1,330 @@ +""" +Peer lifecycle integration tests. + +Covers: + - Key generation via API + - Peer creation with various service_access configs + - Iptables rule verification (enforcement layer) + - Peer update โ†’ rules re-applied + - Peer deletion โ†’ rules cleaned up + - Duplicate name rejection + - DNS ACL file updated on peer changes + +Run with: pytest tests/integration/test_peer_lifecycle.py -v +""" +import pytest +import requests +import sys, os +sys.path.insert(0, os.path.dirname(__file__)) +from conftest import API_BASE, peer_rules, iptables_forward, get_live_service_vips + +# Service โ†’ virtual IP mapping (mirrors firewall_manager.SERVICE_IPS) +ALL_SERVICES = {'calendar', 'files', 'mail', 'webdav'} +ALL_PEERS = ('integration-test-full', 'integration-test-restricted', 'integration-test-none') + + +def api_post(path, **kw): + return requests.post(f"{API_BASE}{path}", **kw) + +def api_get(path, **kw): + return requests.get(f"{API_BASE}{path}", **kw) + +def api_put(path, **kw): + return requests.put(f"{API_BASE}{path}", **kw) + +def api_delete(path, **kw): + return requests.delete(f"{API_BASE}{path}", **kw) + + +# --------------------------------------------------------------------------- +# Helper +# --------------------------------------------------------------------------- + +def generate_keys(name: str) -> dict: + r = api_post('/api/wireguard/keys/peer', json={'name': name}) + assert r.status_code == 200, f"Key generation failed: {r.text}" + keys = r.json() + assert 'public_key' in keys and 'private_key' in keys + return keys + + +def get_peer(name: str) -> dict | None: + peers = api_get('/api/peers').json() + return next((p for p in peers if p['peer'] == name), None) + + +def assert_iptables_accept(peer_ip: str, service: str, vips: dict): + """Assert the peer has an ACCEPT rule for the given service VIP.""" + vip = vips[service] + rules = peer_rules(peer_ip) + matching = [r for r in rules if vip in r and 'ACCEPT' in r] + assert matching, ( + f"Expected ACCEPT rule for {service} ({vip}) on peer {peer_ip}.\n" + f"Current rules:\n" + "\n".join(rules) + ) + + +def assert_iptables_drop(peer_ip: str, service: str, vips: dict): + """Assert the peer has a DROP rule for the given service VIP.""" + vip = vips[service] + rules = peer_rules(peer_ip) + matching = [r for r in rules if vip in r and 'DROP' in r] + assert matching, ( + f"Expected DROP rule for {service} ({vip}) on peer {peer_ip}.\n" + f"Current rules:\n" + "\n".join(rules) + ) + + +def get_service_vips() -> dict: + """Return the actual SERVICE_IPS used by the running firewall_manager.""" + return get_live_service_vips() + + +# --------------------------------------------------------------------------- +# Key generation +# --------------------------------------------------------------------------- + +class TestKeyGeneration: + def test_generate_keys_returns_key_pair(self): + keys = generate_keys('integration-test-keygen') + assert len(keys['public_key']) > 20 + assert len(keys['private_key']) > 20 + + def test_generated_keys_are_different(self): + k1 = generate_keys('integration-test-keygen-a') + k2 = generate_keys('integration-test-keygen-b') + assert k1['public_key'] != k2['public_key'] + + +# --------------------------------------------------------------------------- +# Peer with FULL service access +# --------------------------------------------------------------------------- + +class TestPeerFullAccess: + PEER_NAME = 'integration-test-full' + + def test_create_peer_full_access(self): + keys = generate_keys(self.PEER_NAME) + r = api_post('/api/peers', json={ + 'name': self.PEER_NAME, + 'public_key': keys['public_key'], + 'service_access': list(ALL_SERVICES), + }) + assert r.status_code == 201, f"Peer creation failed: {r.text}" + data = r.json() + assert 'ip' in data + assert self.PEER_NAME in data.get('message', '') + + def test_peer_appears_in_list(self): + peer = get_peer(self.PEER_NAME) + assert peer is not None, f"Peer {self.PEER_NAME} not found in /api/peers" + assert set(peer['service_access']) == ALL_SERVICES + + def test_iptables_accept_all_services(self): + peer = get_peer(self.PEER_NAME) + assert peer, "Peer not found" + vips = get_service_vips() + for svc in ALL_SERVICES: + assert_iptables_accept(peer['ip'], svc, vips) + + def test_iptables_has_internet_accept(self): + peer = get_peer(self.PEER_NAME) + rules = peer_rules(peer['ip']) + # The catch-all internet ACCEPT rule has no -d destination in iptables-save format. + # Service rules always have '-d VIP/32'; the internet rule omits -d entirely. + catch_all = [r for r in rules if '-j ACCEPT' in r and '-d ' not in r] + assert catch_all, ( + f"No catch-all ACCEPT rule (internet access) found for {self.PEER_NAME}.\n" + f"Rules:\n" + "\n".join(rules) + ) + + def test_duplicate_peer_name_rejected(self): + keys = generate_keys(self.PEER_NAME + '-dup') + r = api_post('/api/peers', json={ + 'name': self.PEER_NAME, + 'public_key': keys['public_key'], + }) + assert r.status_code == 400, "Duplicate peer should be rejected" + + def test_delete_peer_full_access(self): + r = api_delete(f'/api/peers/{self.PEER_NAME}') + assert r.status_code == 200 + assert get_peer(self.PEER_NAME) is None + + def test_iptables_rules_removed_after_delete(self): + # Peer was deleted in the previous test โ€” rules must be gone + # We don't have the IP cached here, so verify no test-full comment exists + fw = iptables_forward() + comment = f'pic-peer-' + # Build expected comment from peer name (we need the IP โ€” check all lines) + # If the peer is gone, no rules with this peer's typical IP should mention the test name + # We verify by checking no 'integration-test-full' style comment exists + # (Comments use IPs, not names โ€” so just verify the previous peer IP is gone) + # Since we can't get the IP after deletion, we verify the list is clean + remaining = api_get('/api/peers').json() + names = [p['peer'] for p in remaining] + assert self.PEER_NAME not in names + + +# --------------------------------------------------------------------------- +# Peer with RESTRICTED service access (calendar only) +# --------------------------------------------------------------------------- + +class TestPeerRestrictedAccess: + PEER_NAME = 'integration-test-restricted' + + def test_create_peer_restricted_access(self): + keys = generate_keys(self.PEER_NAME) + r = api_post('/api/peers', json={ + 'name': self.PEER_NAME, + 'public_key': keys['public_key'], + 'service_access': ['calendar'], + 'internet_access': False, + }) + assert r.status_code == 201, f"Peer creation failed: {r.text}" + + def test_peer_service_access_stored_correctly(self): + peer = get_peer(self.PEER_NAME) + assert peer is not None + assert peer['service_access'] == ['calendar'] + assert peer.get('internet_access') is False + + def test_iptables_calendar_accepted(self): + peer = get_peer(self.PEER_NAME) + vips = get_service_vips() + assert_iptables_accept(peer['ip'], 'calendar', vips) + + def test_iptables_other_services_dropped(self): + peer = get_peer(self.PEER_NAME) + vips = get_service_vips() + for svc in ('files', 'mail', 'webdav'): + assert_iptables_drop(peer['ip'], svc, vips) + + def test_iptables_no_internet_accept(self): + peer = get_peer(self.PEER_NAME) + rules = peer_rules(peer['ip']) + # internet_access=False โ†’ no catch-all ACCEPT (no -d rule that is ACCEPT) + catch_all_accept = [r for r in rules if '-j ACCEPT' in r and '-d ' not in r] + assert not catch_all_accept, ( + f"internet_access=False peer should not have catch-all ACCEPT.\nRules:\n" + + "\n".join(rules) + ) + + def test_update_peer_add_files_access(self): + r = api_put(f'/api/peers/{self.PEER_NAME}', + json={'service_access': ['calendar', 'files']}) + assert r.status_code == 200 + + def test_iptables_updated_after_service_change(self): + peer = get_peer(self.PEER_NAME) + vips = get_service_vips() + assert_iptables_accept(peer['ip'], 'calendar', vips) + assert_iptables_accept(peer['ip'], 'files', vips) + assert_iptables_drop(peer['ip'], 'mail', vips) + assert_iptables_drop(peer['ip'], 'webdav', vips) + + def test_delete_restricted_peer(self): + peer = get_peer(self.PEER_NAME) + assert peer is not None + peer_ip = peer['ip'] + + r = api_delete(f'/api/peers/{self.PEER_NAME}') + assert r.status_code == 200 + assert get_peer(self.PEER_NAME) is None + + remaining_rules = peer_rules(peer_ip) + assert not remaining_rules, ( + f"Iptables rules remain after deletion of {self.PEER_NAME} ({peer_ip}):\n" + + "\n".join(remaining_rules) + ) + + +# --------------------------------------------------------------------------- +# Peer with NO service access and NO internet +# --------------------------------------------------------------------------- + +class TestPeerNoAccess: + PEER_NAME = 'integration-test-none' + + def test_create_peer_no_access(self): + keys = generate_keys(self.PEER_NAME) + r = api_post('/api/peers', json={ + 'name': self.PEER_NAME, + 'public_key': keys['public_key'], + 'service_access': [], + 'internet_access': False, + 'peer_access': False, + }) + assert r.status_code == 201, f"Peer creation failed: {r.text}" + + def test_peer_stored_with_empty_service_access(self): + peer = get_peer(self.PEER_NAME) + assert peer is not None + assert peer['service_access'] == [] + assert peer.get('internet_access') is False + assert peer.get('peer_access') is False + + def test_iptables_all_services_dropped(self): + peer = get_peer(self.PEER_NAME) + vips = get_service_vips() + for svc in ALL_SERVICES: + assert_iptables_drop(peer['ip'], svc, vips) + + def test_iptables_peer_to_peer_dropped(self): + peer = get_peer(self.PEER_NAME) + rules = peer_rules(peer['ip']) + # peer_access=False โ†’ 10.0.0.0/24 should be DROP + peer_net_drop = [r for r in rules if '10.0.0.0/24' in r and 'DROP' in r] + assert peer_net_drop, ( + f"Expected DROP rule for peer-to-peer traffic on {self.PEER_NAME}\n" + + "\n".join(rules) + ) + + def test_delete_no_access_peer(self): + peer = get_peer(self.PEER_NAME) + assert peer is not None + peer_ip = peer['ip'] + + r = api_delete(f'/api/peers/{self.PEER_NAME}') + assert r.status_code == 200 + + remaining_rules = peer_rules(peer_ip) + assert not remaining_rules, ( + f"Iptables rules remain after deletion ({peer_ip}):\n" + + "\n".join(remaining_rules) + ) + + +# --------------------------------------------------------------------------- +# Concurrent peer registry consistency +# --------------------------------------------------------------------------- + +class TestPeerRegistryConsistency: + def test_peer_ips_are_unique(self): + peers = api_get('/api/peers').json() + ips = [p['ip'] for p in peers] + assert len(ips) == len(set(ips)), f"Duplicate IPs in peer registry: {ips}" + + def test_all_peer_ips_in_wireguard_subnet(self): + import ipaddress + cfg = api_get('/api/config').json() + wg_addr = cfg.get('service_configs', {}).get('wireguard', {}).get('address', '') + if not wg_addr: + pytest.skip("No WireGuard address configured") + network = ipaddress.ip_network(wg_addr, strict=False) + peers = api_get('/api/peers').json() + for peer in peers: + ip_str = peer['ip'].split('/')[0] + ip = ipaddress.ip_address(ip_str) + assert ip in network, ( + f"Peer {peer['peer']} IP {ip_str} is outside WireGuard subnet {network}" + ) + + def test_each_live_peer_has_iptables_rules(self): + peers = api_get('/api/peers').json() + for peer in peers: + rules = peer_rules(peer['ip']) + assert rules, ( + f"Peer {peer['peer']} ({peer['ip']}) has no iptables rules โ€” " + "enforcement is missing" + ) diff --git a/tests/integration/test_webui.py b/tests/integration/test_webui.py new file mode 100644 index 0000000..fd5bdab --- /dev/null +++ b/tests/integration/test_webui.py @@ -0,0 +1,50 @@ +""" +WebUI smoke tests โ€” verify the React app is served correctly. + +These don't test UI interactions (that requires Playwright). +They verify the static serving layer is working and the JS bundle loads. + +Run with: pytest tests/integration/test_webui.py -v +""" +import requests +import sys, os +sys.path.insert(0, os.path.dirname(__file__)) +from conftest import WEBUI_BASE + + +def get(path, **kw): + return requests.get(f"{WEBUI_BASE}{path}", **kw) + + +class TestWebUIServing: + def test_root_returns_200(self): + r = get('/') + assert r.status_code == 200 + + def test_root_is_html(self): + r = get('/') + assert 'text/html' in r.headers.get('Content-Type', '') + + def test_root_contains_react_mount(self): + r = get('/') + assert '
' in r.text, "React mount point not found in index.html" + + def test_root_references_js_bundle(self): + r = get('/') + assert '.js' in r.text, "No JS bundle reference found in index.html" + + def test_spa_routing_fallback(self): + # SPA routes that don't exist as files should still return index.html + for path in ('/peers', '/settings', '/wireguard', '/network'): + r = get(path) + assert r.status_code == 200, f"SPA route {path} returned {r.status_code}" + assert 'text/html' in r.headers.get('Content-Type', ''), \ + f"SPA route {path} didn't return HTML" + + def test_api_reachable_from_webui_origin(self): + # Verify the API is accessible (CORS / proxy config working) + r = requests.get(f"{WEBUI_BASE.rstrip('/')}/api/status".replace( + f':{80}', '').replace('///', '//')) + # The webui container proxies /api โ†’ cell-api, so this should work + # If not proxied, it might 404 โ€” either way it shouldn't be a connection error + assert r.status_code in (200, 404, 301, 302) diff --git a/tests/test_api_endpoints.py b/tests/test_api_endpoints.py index 4d8f0be..d1a8a75 100644 --- a/tests/test_api_endpoints.py +++ b/tests/test_api_endpoints.py @@ -141,17 +141,23 @@ class TestAPIEndpoints(unittest.TestCase): mock_network.add_dhcp_reservation.return_value = True response = self.client.post('/api/dhcp/reservations', data=json.dumps({'ip': '10.0.0.2', 'mac': '00:11:22:33:44:55'}), content_type='application/json') self.assertEqual(response.status_code, 200) - # Simulate error - mock_network.add_dhcp_reservation.side_effect = Exception('fail') + # Missing mac field โ†’ 400, not 500 response = self.client.post('/api/dhcp/reservations', data=json.dumps({'ip': '10.0.0.2'}), content_type='application/json') + self.assertEqual(response.status_code, 400) + # Simulate manager error + mock_network.add_dhcp_reservation.side_effect = Exception('fail') + response = self.client.post('/api/dhcp/reservations', data=json.dumps({'ip': '10.0.0.2', 'mac': '00:11:22:33:44:55'}), content_type='application/json') self.assertEqual(response.status_code, 500) # Mock remove_dhcp_reservation mock_network.remove_dhcp_reservation.return_value = True - response = self.client.delete('/api/dhcp/reservations', data=json.dumps({'ip': '10.0.0.2'}), content_type='application/json') + response = self.client.delete('/api/dhcp/reservations', data=json.dumps({'mac': '00:11:22:33:44:55'}), content_type='application/json') self.assertEqual(response.status_code, 200) - # Simulate error - mock_network.remove_dhcp_reservation.side_effect = Exception('fail') + # Missing mac โ†’ 400 response = self.client.delete('/api/dhcp/reservations', data=json.dumps({'ip': '10.0.0.2'}), content_type='application/json') + self.assertEqual(response.status_code, 400) + # Simulate manager error + mock_network.remove_dhcp_reservation.side_effect = Exception('fail') + response = self.client.delete('/api/dhcp/reservations', data=json.dumps({'mac': '00:11:22:33:44:55'}), content_type='application/json') self.assertEqual(response.status_code, 500) @patch('app.network_manager') diff --git a/tests/test_app_misc.py b/tests/test_app_misc.py index 12f2070..e326921 100644 --- a/tests/test_app_misc.py +++ b/tests/test_app_misc.py @@ -45,7 +45,6 @@ class TestAppMisc(unittest.TestCase): patch.object(app_module, 'calendar_manager', MagicMock()), patch.object(app_module, 'file_manager', MagicMock()), patch.object(app_module, 'routing_manager', MagicMock()), - patch.object(app_module, 'cell_manager', MagicMock()), patch.object(app_module, 'container_manager', MagicMock()), ] for p in self.patches: @@ -97,18 +96,46 @@ class TestAppMisc(unittest.TestCase): self.assertEqual(ctx['path'], '/test') self.assertEqual(ctx['user'], 'user1') - def test_is_local_request(self): - class DummyRequest: - remote_addr = '127.0.0.1' - headers = {} - with patch('app.request', new=DummyRequest()): + def _req(self, remote_addr, xff=''): + class R: + pass + r = R() + r.remote_addr = remote_addr + r.headers = {'X-Forwarded-For': xff} if xff else {} + return r + + def test_is_local_request_loopback(self): + with patch('app.request', new=self._req('127.0.0.1')): self.assertTrue(app_module.is_local_request()) - class DummyRequest2: - remote_addr = '8.8.8.8' - headers = {} - with patch('app.request', new=DummyRequest2()): + + def test_is_local_request_public_ip(self): + with patch('app.request', new=self._req('8.8.8.8')): self.assertFalse(app_module.is_local_request()) + def test_is_local_request_private_ip(self): + with patch('app.request', new=self._req('192.168.1.5')): + self.assertTrue(app_module.is_local_request()) + + def test_is_local_request_xff_spoof_rejected(self): + # Client sends X-Forwarded-For: 127.0.0.1 but actual IP is public + # Old code would trust the first XFF entry โ€” fixed to trust only last + with patch('app.request', new=self._req('8.8.8.8', xff='127.0.0.1, 8.8.8.8')): + self.assertFalse(app_module.is_local_request()) + + def test_is_local_request_xff_last_entry_local(self): + # Caddy appends the real client IP; last entry is local โ†’ allow + with patch('app.request', new=self._req('8.8.8.8', xff='8.8.8.8, 192.168.1.10')): + self.assertTrue(app_module.is_local_request()) + + def test_is_local_request_xff_single_public_rejected(self): + with patch('app.request', new=self._req('8.8.8.8', xff='1.2.3.4')): + self.assertFalse(app_module.is_local_request()) + + def test_is_local_request_cell_network_ip(self): + # 172.20.0.10 is the API container's IP โ€” should be allowed + with patch('app.request', new=self._req('172.20.0.10')): + self.assertTrue(app_module.is_local_request()) + def test_health_check_exception(self): # Patch datetime to raise exception with patch('app.datetime') as mock_dt, app_module.app.app_context(): diff --git a/tests/test_config_validation.py b/tests/test_config_validation.py new file mode 100644 index 0000000..211f596 --- /dev/null +++ b/tests/test_config_validation.py @@ -0,0 +1,174 @@ +""" +Tests for PUT /api/config input validation (400 paths). +These are the highest-risk untested paths: the only server-side guard against +bad subnet/port values entering persistent config. +""" +import json +import sys +import os +import unittest +from unittest.mock import patch, MagicMock + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'api')) + + +def _make_client(): + from app import app + app.config['TESTING'] = True + return app.test_client() + + +def _put(client, payload): + return client.put( + '/api/config', + data=json.dumps(payload), + content_type='application/json', + ) + + +# --------------------------------------------------------------------------- +# ip_range validation +# --------------------------------------------------------------------------- + +class TestIpRangeValidation(unittest.TestCase): + + def setUp(self): + self.client = _make_client() + + def test_non_rfc1918_returns_400(self): + r = _put(self.client, {'ip_range': '1.2.3.0/24'}) + self.assertEqual(r.status_code, 400) + body = json.loads(r.data) + self.assertIn('error', body) + self.assertIn('RFC-1918', body['error']) + + def test_172_0_subnet_returns_400(self): + # 172.0.0.0/24 is NOT in 172.16.0.0/12 โ€” was the bug on the dev machine + r = _put(self.client, {'ip_range': '172.0.0.0/24'}) + self.assertEqual(r.status_code, 400) + + def test_172_15_subnet_returns_400(self): + # One prefix below the 172.16.0.0/12 boundary + r = _put(self.client, {'ip_range': '172.15.0.0/24'}) + self.assertEqual(r.status_code, 400) + + def test_172_32_subnet_returns_400(self): + # One prefix above the 172.31.255.255 boundary + r = _put(self.client, {'ip_range': '172.32.0.0/24'}) + self.assertEqual(r.status_code, 400) + + def test_public_ip_returns_400(self): + r = _put(self.client, {'ip_range': '8.8.0.0/16'}) + self.assertEqual(r.status_code, 400) + + def test_172_16_exact_boundary_accepted(self): + # 172.16.0.0/12 is the exact lower boundary โ€” must be valid + r = _put(self.client, {'ip_range': '172.16.0.0/12'}) + # 200 or 202 โ€” just not 400 + self.assertNotEqual(r.status_code, 400) + + def test_10_network_accepted(self): + r = _put(self.client, {'ip_range': '10.0.0.0/8'}) + self.assertNotEqual(r.status_code, 400) + + def test_192_168_network_accepted(self): + r = _put(self.client, {'ip_range': '192.168.0.0/16'}) + self.assertNotEqual(r.status_code, 400) + + def test_invalid_cidr_syntax_returns_400(self): + r = _put(self.client, {'ip_range': 'not-a-cidr'}) + self.assertEqual(r.status_code, 400) + + +# --------------------------------------------------------------------------- +# Port range validation +# --------------------------------------------------------------------------- + +class TestPortValidation(unittest.TestCase): + + def setUp(self): + self.client = _make_client() + + def test_dns_port_zero_returns_400(self): + r = _put(self.client, {'network': {'dns_port': 0}}) + self.assertEqual(r.status_code, 400) + body = json.loads(r.data) + self.assertIn('dns_port', body.get('error', '')) + + def test_dns_port_65536_returns_400(self): + r = _put(self.client, {'network': {'dns_port': 65536}}) + self.assertEqual(r.status_code, 400) + + def test_wireguard_port_zero_returns_400(self): + r = _put(self.client, {'wireguard': {'port': 0}}) + self.assertEqual(r.status_code, 400) + + def test_wireguard_port_65536_returns_400(self): + r = _put(self.client, {'wireguard': {'port': 65536}}) + self.assertEqual(r.status_code, 400) + + def test_wireguard_port_1_accepted(self): + r = _put(self.client, {'wireguard': {'port': 1}}) + self.assertNotEqual(r.status_code, 400) + + def test_wireguard_port_65535_accepted(self): + r = _put(self.client, {'wireguard': {'port': 65535}}) + self.assertNotEqual(r.status_code, 400) + + def test_email_smtp_port_zero_returns_400(self): + r = _put(self.client, {'email': {'smtp_port': 0}}) + self.assertEqual(r.status_code, 400) + + def test_calendar_port_negative_returns_400(self): + r = _put(self.client, {'calendar': {'port': -1}}) + self.assertEqual(r.status_code, 400) + + +# --------------------------------------------------------------------------- +# WireGuard address validation +# --------------------------------------------------------------------------- + +class TestWireguardAddressValidation(unittest.TestCase): + + def setUp(self): + self.client = _make_client() + + def test_bad_wg_address_returns_400(self): + r = _put(self.client, {'wireguard': {'address': 'not-an-ip'}}) + self.assertEqual(r.status_code, 400) + body = json.loads(r.data) + self.assertIn('wireguard.address', body.get('error', '')) + + def test_ip_without_prefix_returns_400(self): + r = _put(self.client, {'wireguard': {'address': '10.0.0.1'}}) + self.assertEqual(r.status_code, 400) + + def test_valid_wg_address_accepted(self): + r = _put(self.client, {'wireguard': {'address': '10.0.0.1/24'}}) + self.assertNotEqual(r.status_code, 400) + + +# --------------------------------------------------------------------------- +# Body validation +# --------------------------------------------------------------------------- + +class TestBodyValidation(unittest.TestCase): + + def setUp(self): + self.client = _make_client() + + def test_no_body_returns_400(self): + r = self.client.put('/api/config', content_type='application/json') + self.assertEqual(r.status_code, 400) + + def test_empty_body_returns_400(self): + r = self.client.put('/api/config', data='', content_type='application/json') + self.assertEqual(r.status_code, 400) + + def test_valid_cell_name_change_returns_200(self): + r = _put(self.client, {'cell_name': 'testcell'}) + self.assertEqual(r.status_code, 200) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_ip_utils_caddyfile.py b/tests/test_ip_utils_caddyfile.py new file mode 100644 index 0000000..3721700 --- /dev/null +++ b/tests/test_ip_utils_caddyfile.py @@ -0,0 +1,102 @@ +""" +Tests for ip_utils.write_caddyfile โ€” this function is called on every +ip_range / domain / cell_name change and was previously untested. +""" +import os +import sys +import tempfile +import unittest + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'api')) + +from ip_utils import write_caddyfile, get_service_ips + + +class TestWriteCaddyfile(unittest.TestCase): + + def setUp(self): + self.tmp = tempfile.mkdtemp() + self.path = os.path.join(self.tmp, 'caddy', 'Caddyfile') + + def _write(self, ip_range='172.20.0.0/16', cell_name='mycell', domain='cell'): + ok = write_caddyfile(ip_range, cell_name, domain, self.path) + self.assertTrue(ok, "write_caddyfile returned False") + with open(self.path) as f: + return f.read() + + def test_creates_file_in_subdirectory(self): + self._write() + self.assertTrue(os.path.isfile(self.path)) + + def test_cell_domain_vhost_present(self): + content = self._write(cell_name='mycell', domain='cell') + self.assertIn('http://mycell.cell', content) + + def test_custom_domain_used(self): + content = self._write(cell_name='pic0', domain='dev') + self.assertIn('http://pic0.dev', content) + self.assertNotIn('mycell', content) + self.assertNotIn('.cell', content) + + def test_service_subdomains_use_domain(self): + content = self._write(domain='mynet') + self.assertIn('http://calendar.mynet', content) + self.assertIn('http://files.mynet', content) + self.assertIn('http://mail.mynet', content) + self.assertIn('http://webdav.mynet', content) + + def test_virtual_ips_match_ip_range(self): + ip_range = '10.0.0.0/16' + content = self._write(ip_range=ip_range) + ips = get_service_ips(ip_range) + self.assertIn(ips['vip_calendar'], content) + self.assertIn(ips['vip_files'], content) + self.assertIn(ips['vip_mail'], content) + self.assertIn(ips['vip_webdav'], content) + + def test_reverse_proxy_targets_are_internal_ports(self): + content = self._write() + self.assertIn('reverse_proxy cell-radicale:5232', content) + self.assertIn('reverse_proxy cell-filegator:8080', content) + self.assertIn('reverse_proxy cell-rainloop:8888', content) + self.assertIn('reverse_proxy cell-webdav:80', content) + + def test_api_proxy_present(self): + content = self._write() + self.assertIn('reverse_proxy cell-api:3000', content) + + def test_overwrite_on_second_call(self): + self._write(cell_name='first', domain='cell') + content = self._write(cell_name='second', domain='cell') + self.assertIn('second.cell', content) + self.assertNotIn('first.cell', content) + + def test_different_ip_ranges_produce_different_vips(self): + c1 = self._write(ip_range='10.0.0.0/16') + os.remove(self.path) + c2 = self._write(ip_range='192.168.1.0/24') + self.assertNotEqual(c1, c2) + + def test_auto_https_off(self): + content = self._write() + self.assertIn('auto_https off', content) + + def test_catchall_block_present(self): + content = self._write() + self.assertIn(':80 {', content) + + def test_invalid_ip_range_returns_false(self): + result = write_caddyfile('not-a-cidr', 'cell', 'cell', self.path) + self.assertFalse(result) + + def test_file_is_not_empty(self): + self._write() + self.assertGreater(os.path.getsize(self.path), 100) + + def tearDown(self): + import shutil + shutil.rmtree(self.tmp, ignore_errors=True) + + +if __name__ == '__main__': + unittest.main() diff --git a/webui/src/pages/Settings.jsx b/webui/src/pages/Settings.jsx index 7223b9f..a84a9f4 100644 --- a/webui/src/pages/Settings.jsx +++ b/webui/src/pages/Settings.jsx @@ -69,14 +69,93 @@ function Section({ icon: Icon, title, children, collapsible = false, defaultOpen ); } +// โ”€โ”€ Validation utilities โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + +function isValidPort(v) { + const n = Number(v); + return Number.isInteger(n) && n >= 1 && n <= 65535; +} + +function isValidIp(v) { + if (!v || !v.trim()) return false; + const m = v.trim().match(/^(\d+)\.(\d+)\.(\d+)\.(\d+)$/); + if (!m) return false; + return m.slice(1, 5).map(Number).every(n => n >= 0 && n <= 255); +} + +function isValidIpCidr(v) { + if (!v || !v.trim()) return false; + const m = v.trim().match(/^(\d+)\.(\d+)\.(\d+)\.(\d+)\/(\d+)$/); + if (!m) return false; + const [, a, b, c, d, p] = m.map(Number); + return [a, b, c, d].every(n => n >= 0 && n <= 255) && p >= 0 && p <= 32; +} + +const E_PORT = 'Must be 1โ€“65535'; +const E_IP = 'Must be a valid IP address'; +const E_CIDR = 'Must be a valid IP/CIDR (e.g. 10.0.0.1/24)'; + +function validateServiceConfig(key, data) { + const errors = {}; + const port = (field) => { + if (data[field] !== undefined && data[field] !== '' && !isValidPort(data[field])) + errors[field] = E_PORT; + }; + if (key === 'network') { + port('dns_port'); + if (data.dhcp_range) { + const parts = data.dhcp_range.split(','); + if (parts[0]?.trim() && !isValidIp(parts[0].trim())) + errors.dhcp_range = `Start IP is invalid`; + else if (parts[1]?.trim() && !isValidIp(parts[1].trim())) + errors.dhcp_range = `End IP is invalid`; + } + } + if (key === 'wireguard') { + port('port'); + if (data.address && !isValidIpCidr(data.address)) errors.address = E_CIDR; + } + if (key === 'email') { + port('smtp_port'); port('submission_port'); port('imap_port'); port('webmail_port'); + } + if (key === 'calendar') port('port'); + if (key === 'files') { port('port'); port('manager_port'); } + return errors; +} + +// โ”€โ”€ RFC-1918 validation โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ + +function isRFC1918Cidr(cidr) { + if (!cidr || !cidr.trim()) return false; + const m = cidr.trim().match(/^(\d+)\.(\d+)\.(\d+)\.(\d+)\/(\d+)$/); + if (!m) return false; + const [, a, b, c, d, p] = m.map(Number); + if ([a, b, c, d].some(n => n < 0 || n > 255) || p < 0 || p > 32) return false; + const ip = ((a << 24) | (b << 16) | (c << 8) | d) >>> 0; + const ranges = [ + { net: 0x0a000000, prefix: 8 }, // 10.0.0.0/8 + { net: 0xac100000, prefix: 12 }, // 172.16.0.0/12 + { net: 0xc0a80000, prefix: 16 }, // 192.168.0.0/16 + ]; + for (const { net, prefix } of ranges) { + if (p < prefix) continue; + const mask = (0xffffffff << (32 - prefix)) >>> 0; + if ((ip & mask) >>> 0 === (net & mask) >>> 0) return true; + } + return false; +} + // โ”€โ”€ Field components โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ -function Field({ label, children, hint }) { +function Field({ label, children, hint, error }) { return ( -
- -
{children}
- {hint && {hint}} +
+ +
+ {children} + {error &&

{error}

} +
+ {hint && !error && {hint}}
); } @@ -157,13 +236,13 @@ function TagList({ value = [], onChange, placeholder }) { // โ”€โ”€ Service config forms โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€ -function NetworkForm({ data, onChange }) { +function NetworkForm({ data, onChange, errors = {} }) { return (
- + onChange({ ...data, dns_port: v })} min={1} max={65535} /> - + onChange({ ...data, dhcp_range: v })} placeholder="10.0.0.100,10.0.0.200,12h" /> @@ -173,13 +252,13 @@ function NetworkForm({ data, onChange }) { ); } -function WireguardForm({ data, onChange }) { +function WireguardForm({ data, onChange, errors = {} }) { return (
- + onChange({ ...data, port: v })} min={1} max={65535} /> - + onChange({ ...data, address: v })} placeholder="10.0.0.1/24" /> @@ -189,32 +268,32 @@ function WireguardForm({ data, onChange }) { ); } -function EmailForm({ data, onChange }) { +function EmailForm({ data, onChange, errors = {} }) { return (
onChange({ ...data, domain: v })} placeholder="mail.example.com" /> - + onChange({ ...data, smtp_port: v })} min={1} max={65535} /> - + onChange({ ...data, submission_port: v })} min={1} max={65535} /> - + onChange({ ...data, imap_port: v })} min={1} max={65535} /> - + onChange({ ...data, webmail_port: v })} min={1} max={65535} />
); } -function CalendarForm({ data, onChange }) { +function CalendarForm({ data, onChange, errors = {} }) { return (
- + onChange({ ...data, port: v })} min={1} max={65535} /> @@ -224,13 +303,13 @@ function CalendarForm({ data, onChange }) { ); } -function FilesForm({ data, onChange }) { +function FilesForm({ data, onChange, errors = {} }) { return (
- + onChange({ ...data, port: v })} min={1} max={65535} /> - + onChange({ ...data, manager_port: v })} min={1} max={65535} /> @@ -338,7 +417,12 @@ function Settings() { }; // identity save + const ipRangeError = identity.ip_range && !isRFC1918Cidr(identity.ip_range) + ? 'Must be within an RFC-1918 range: 10.0.0.0/8, 172.16.0.0/12, or 192.168.0.0/16' + : null; + const saveIdentity = async () => { + if (ipRangeError) return; setIdentitySaving(true); try { const res = await cellAPI.updateConfig(identity); @@ -354,6 +438,9 @@ function Settings() { // service config save const saveService = async (key) => { + const { defaults } = SERVICE_DEFS.find((d) => d.key === key) || {}; + const data = { ...(defaults || {}), ...(serviceConfigs[key] || {}) }; + if (Object.keys(validateServiceConfig(key, data)).length > 0) return; setServiceSaving((s) => ({ ...s, [key]: true })); try { const res = await cellAPI.updateConfig({ [key]: serviceConfigs[key] }); @@ -475,7 +562,7 @@ function Settings() { placeholder="cell.local" /> - + { setIdentity((i) => ({ ...i, ip_range: v })); setIdentityDirty(true); }} @@ -486,7 +573,7 @@ function Settings() {
{SERVICE_DEFS.map(({ key, label, icon: Icon, Form, defaults }) => { const data = { ...defaults, ...(serviceConfigs[key] || {}) }; + const errors = validateServiceConfig(key, data); + const hasErrors = Object.keys(errors).length > 0; const dirty = serviceDirty[key]; const saving = serviceSaving[key]; return (
-
updateServiceConfig(key, d)} /> + updateServiceConfig(key, d)} errors={errors} />
Port/directory changes take effect after container restart.