diff --git a/Makefile b/Makefile
index d68ed9c..4fd425f 100644
--- a/Makefile
+++ b/Makefile
@@ -80,6 +80,10 @@ start-wg:
@echo "Starting WireGuard service..."
docker-compose up -d wireguard
+start-webui:
+ @echo "Starting WebUi service..."
+ docker-compose up -d webui
+
# Maintenance commands
clean:
@echo "Cleaning up containers and volumes..."
diff --git a/Personal Internet Cell – Project Wiki.md b/Personal Internet Cell – Project Wiki.md
index d9bbb9b..f036f48 100644
--- a/Personal Internet Cell – Project Wiki.md
+++ b/Personal Internet Cell – Project Wiki.md
@@ -438,7 +438,7 @@ python api/app.py
python api/test_enhanced_api.py
# Start frontend (if available)
-cd webui && npm install && npm run dev
+cd webui && bun install && npm run dev
```
### **Production Deployment**
diff --git a/README.md b/README.md
index c50ed35..25df885 100644
--- a/README.md
+++ b/README.md
@@ -345,7 +345,7 @@ python api/app.py
python api/test_enhanced_api.py
# Start frontend (if available)
-cd webui && npm install && npm run dev
+cd webui && bun install && npm run dev
```
### **Service Development**
diff --git a/api/Dockerfile b/api/Dockerfile
index e973bb9..b5faa9e 100644
--- a/api/Dockerfile
+++ b/api/Dockerfile
@@ -7,6 +7,13 @@ RUN apt-get update && apt-get install -y \
wireguard-tools \
iptables \
curl \
+ ca-certificates \
+ gnupg \
+ lsb-release \
+ && curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg \
+ && echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null \
+ && apt-get update \
+ && apt-get install -y docker-ce-cli \
&& rm -rf /var/lib/apt/lists/*
# Copy requirements first for better caching
diff --git a/api/app.py b/api/app.py
index f377c59..318de3f 100644
--- a/api/app.py
+++ b/api/app.py
@@ -102,9 +102,9 @@ CORS(app)
app.config['DEVELOPMENT_MODE'] = True # Set to True for development, False for production
# Initialize enhanced components
-config_manager = ConfigManager()
+config_manager = ConfigManager(config_file='./config/cell_config.json', data_dir='./data')
service_bus = ServiceBus()
-log_manager = LogManager()
+log_manager = LogManager(log_dir='./data/logs')
# Initialize service loggers
service_log_configs = {
@@ -150,17 +150,17 @@ def log_request(response):
def clear_log_context(exc):
request_context.set({})
-# Initialize managers
-network_manager = NetworkManager()
-wireguard_manager = WireGuardManager()
-peer_registry = PeerRegistry()
-email_manager = EmailManager()
-calendar_manager = CalendarManager()
-file_manager = FileManager()
-routing_manager = RoutingManager()
-cell_manager = CellManager()
-app.vault_manager = VaultManager()
-container_manager = ContainerManager()
+# Initialize managers with proper directories
+network_manager = NetworkManager(data_dir='./data', config_dir='./config')
+wireguard_manager = WireGuardManager(data_dir='./data', config_dir='./config')
+peer_registry = PeerRegistry(data_dir='./data', config_dir='./config')
+email_manager = EmailManager(data_dir='./data', config_dir='./config')
+calendar_manager = CalendarManager(data_dir='./data', config_dir='./config')
+file_manager = FileManager(data_dir='./data', config_dir='./config')
+routing_manager = RoutingManager(data_dir='./data', config_dir='./config')
+cell_manager = CellManager(data_dir='./data', config_dir='./config')
+app.vault_manager = VaultManager(data_dir='./data', config_dir='./config')
+container_manager = ContainerManager(data_dir='./data', config_dir='./config')
# Register services with service bus
service_bus.register_service('network', network_manager)
@@ -686,8 +686,8 @@ def test_network():
def get_wireguard_keys():
"""Get WireGuard keys."""
try:
- keys = wireguard_manager.get_keys()
- return jsonify(keys)
+ # For now, return empty keys - this would need to be implemented
+ return jsonify({"error": "Not implemented yet"}), 501
except Exception as e:
logger.error(f"Error getting WireGuard keys: {e}")
return jsonify({"error": str(e)}), 500
@@ -697,7 +697,9 @@ def generate_peer_keys():
"""Generate peer keys."""
try:
data = request.get_json(silent=True)
- result = wireguard_manager.generate_peer_keys(data)
+ if data is None or 'peer_name' not in data:
+ return jsonify({"error": "Missing peer_name"}), 400
+ result = wireguard_manager.generate_peer_keys(data['peer_name'])
return jsonify(result)
except Exception as e:
logger.error(f"Error generating peer keys: {e}")
@@ -707,8 +709,8 @@ def generate_peer_keys():
def get_wireguard_config():
"""Get WireGuard configuration."""
try:
- config = wireguard_manager.get_config()
- return jsonify(config)
+ # For now, return empty config - this would need to be implemented
+ return jsonify({"error": "Not implemented yet"}), 501
except Exception as e:
logger.error(f"Error getting WireGuard config: {e}")
return jsonify({"error": str(e)}), 500
@@ -717,7 +719,7 @@ def get_wireguard_config():
def get_wireguard_peers():
"""Get WireGuard peers."""
try:
- peers = wireguard_manager.get_peers()
+ peers = wireguard_manager.get_wireguard_peers()
return jsonify(peers)
except Exception as e:
logger.error(f"Error getting WireGuard peers: {e}")
@@ -728,8 +730,22 @@ def add_wireguard_peer():
"""Add WireGuard peer."""
try:
data = request.get_json(silent=True)
- result = wireguard_manager.add_peer(data)
- return jsonify(result)
+ if data is None:
+ return jsonify({"error": "No data provided"}), 400
+
+ required_fields = ['name', 'public_key', 'allowed_ips']
+ for field in required_fields:
+ if field not in data:
+ return jsonify({"error": f"Missing required field: {field}"}), 400
+
+ result = wireguard_manager.add_wireguard_peer(
+ name=data['name'],
+ public_key=data['public_key'],
+ allowed_ips=data['allowed_ips'],
+ endpoint=data.get('endpoint', ''),
+ persistent_keepalive=data.get('persistent_keepalive', 25)
+ )
+ return jsonify({"success": result})
except Exception as e:
logger.error(f"Error adding WireGuard peer: {e}")
return jsonify({"error": str(e)}), 500
@@ -739,8 +755,11 @@ def remove_wireguard_peer():
"""Remove WireGuard peer."""
try:
data = request.get_json(silent=True)
- result = wireguard_manager.remove_peer(data)
- return jsonify(result)
+ if data is None or 'name' not in data:
+ return jsonify({"error": "Missing peer name"}), 400
+
+ result = wireguard_manager.remove_wireguard_peer(data['name'])
+ return jsonify({"success": result})
except Exception as e:
logger.error(f"Error removing WireGuard peer: {e}")
return jsonify({"error": str(e)}), 500
@@ -772,8 +791,11 @@ def update_peer_ip():
"""Update peer IP."""
try:
data = request.get_json(silent=True)
- result = wireguard_manager.update_peer_ip(data)
- return jsonify(result)
+ if data is None or 'name' not in data or 'ip' not in data:
+ return jsonify({"error": "Missing peer name or IP"}), 400
+
+ # For now, return not implemented - this would need to be implemented
+ return jsonify({"error": "Not implemented yet"}), 501
except Exception as e:
logger.error(f"Error updating peer IP: {e}")
return jsonify({"error": str(e)}), 500
@@ -782,10 +804,11 @@ def update_peer_ip():
def get_peer_config():
try:
data = request.get_json(silent=True)
- if data is None:
- return jsonify({"error": "No data provided"}), 400
- result = wireguard_manager.get_peer_config(data)
- return jsonify(result)
+ if data is None or 'name' not in data:
+ return jsonify({"error": "Missing peer name"}), 400
+
+ # For now, return not implemented - this would need to be implemented
+ return jsonify({"error": "Not implemented yet"}), 501
except Exception as e:
logger.error(f"Error getting peer config: {e}")
return jsonify({"error": str(e)}), 500
@@ -883,7 +906,8 @@ def update_peer_ip_registry(peer_name):
except Exception as e:
logger.warning(f"RoutingManager update_peer_ip failed: {e}")
try:
- wireguard_manager.update_peer_ip(peer_name, new_ip)
+ # For now, skip WireGuard update - method not implemented
+ logger.warning(f"WireGuardManager update_peer_ip not implemented yet")
except Exception as e:
logger.warning(f"WireGuardManager update_peer_ip failed: {e}")
return jsonify({"message": f"IP update received for {peer_name}"})
@@ -912,7 +936,8 @@ def ip_update():
except Exception as e:
logger.warning(f"RoutingManager update_peer_ip failed: {e}")
try:
- wireguard_manager.update_peer_ip(peer_name, new_ip)
+ # For now, skip WireGuard update - method not implemented
+ logger.warning(f"WireGuardManager update_peer_ip not implemented yet")
except Exception as e:
logger.warning(f"WireGuardManager update_peer_ip failed: {e}")
return jsonify({"message": f"IP update received for {peer_name}"})
diff --git a/api/calendar_manager.py b/api/calendar_manager.py
index e737b10..55074b2 100644
--- a/api/calendar_manager.py
+++ b/api/calendar_manager.py
@@ -35,10 +35,11 @@ class CalendarManager(BaseServiceManager):
is_docker = os.path.exists('/.dockerenv') or os.environ.get('DOCKER_CONTAINER') == 'true'
if is_docker:
- # Return positive status when running in Docker
+ # Check if calendar container is actually running
+ container_running = self._check_calendar_container_status()
status = {
- 'running': True,
- 'status': 'online',
+ 'running': container_running,
+ 'status': 'online' if container_running else 'offline',
'users_count': 0,
'calendars_count': 0,
'events_count': 0,
@@ -97,6 +98,16 @@ class CalendarManager(BaseServiceManager):
except Exception:
return False
+ def _check_calendar_container_status(self) -> bool:
+ """Check if calendar Docker container is running"""
+ try:
+ import docker
+ client = docker.from_env()
+ containers = client.containers.list(filters={'name': 'cell-radicale'})
+ return len(containers) > 0
+ except Exception:
+ return False
+
def _test_service_connectivity(self) -> Dict[str, Any]:
"""Test calendar service connectivity"""
try:
diff --git a/api/config_manager.py b/api/config_manager.py
index db561bc..0c8e1d4 100644
--- a/api/config_manager.py
+++ b/api/config_manager.py
@@ -1,383 +1,384 @@
-#!/usr/bin/env python3
-"""
-Configuration Manager for Personal Internet Cell
-Centralized configuration management for all services
-"""
-
-import os
-import json
-import yaml
-import shutil
-import hashlib
-from datetime import datetime
-from typing import Dict, List, Optional, Any
-from pathlib import Path
-import logging
-
-logger = logging.getLogger(__name__)
-
-class ConfigManager:
- """Centralized configuration management for all services (unified config)"""
-
- def __init__(self, config_file: str = '/app/config/cell_config.json', data_dir: str = '/app/data'):
- config_file = Path(config_file)
- if config_file.is_dir():
- config_file = config_file / 'cell_config.json'
- print(f"[DEBUG] ConfigManager.__init__: config_file = {config_file}")
- self.config_file = config_file
- self.data_dir = Path(data_dir)
- self.backup_dir = self.data_dir / 'config_backups'
- self.secrets_file = self.config_file.parent / 'secrets.yaml'
- self.backup_dir.mkdir(parents=True, exist_ok=True)
- self.service_schemas = self._load_service_schemas()
- self.configs = self._load_all_configs()
-
- def _load_service_schemas(self) -> Dict[str, Dict]:
- """Load configuration schemas for all services"""
- return {
- 'network': {
- 'required': ['dns_port', 'dhcp_range', 'ntp_servers'],
- 'optional': ['dns_zones', 'dhcp_reservations'],
- 'types': {
- 'dns_port': int,
- 'dhcp_range': str,
- 'ntp_servers': list
- }
- },
- 'wireguard': {
- 'required': ['port', 'private_key', 'address'],
- 'optional': ['peers', 'allowed_ips'],
- 'types': {
- 'port': int,
- 'private_key': str,
- 'address': str
- }
- },
- 'email': {
- 'required': ['domain', 'smtp_port', 'imap_port'],
- 'optional': ['users', 'ssl_cert', 'ssl_key'],
- 'types': {
- 'smtp_port': int,
- 'imap_port': int,
- 'domain': str
- }
- },
- 'calendar': {
- 'required': ['port', 'data_dir'],
- 'optional': ['users', 'calendars'],
- 'types': {
- 'port': int,
- 'data_dir': str
- }
- },
- 'files': {
- 'required': ['port', 'data_dir'],
- 'optional': ['users', 'quota'],
- 'types': {
- 'port': int,
- 'data_dir': str,
- 'quota': int
- }
- },
- 'routing': {
- 'required': ['nat_enabled', 'firewall_enabled'],
- 'optional': ['nat_rules', 'firewall_rules', 'peer_routes'],
- 'types': {
- 'nat_enabled': bool,
- 'firewall_enabled': bool
- }
- },
- 'vault': {
- 'required': ['ca_configured', 'fernet_configured'],
- 'optional': ['certificates', 'trusted_keys'],
- 'types': {
- 'ca_configured': bool,
- 'fernet_configured': bool
- }
- }
- }
-
- def _load_all_configs(self) -> Dict[str, Dict]:
- """Load all existing service configurations"""
- if self.config_file.exists():
- try:
- with open(self.config_file, 'r') as f:
- return json.load(f)
- except Exception as e:
- logger.error(f"Error loading unified config: {e}")
- return {}
- return {}
-
- def _save_all_configs(self):
- """Save all service configurations to the unified config file"""
- with open(self.config_file, 'w') as f:
- json.dump(self.configs, f, indent=2)
-
- def get_service_config(self, service: str) -> Dict[str, Any]:
- """Get configuration for a specific service"""
- if service not in self.service_schemas:
- raise ValueError(f"Unknown service: {service}")
- return self.configs.get(service, {})
-
- def update_service_config(self, service: str, config: Dict[str, Any]) -> bool:
- """Update configuration for a specific service"""
- if service not in self.service_schemas:
- raise ValueError(f"Unknown service: {service}")
- try:
- # Validate configuration
- validation = self.validate_config(service, config)
- if not validation['valid']:
- logger.error(f"Invalid config for {service}: {validation['errors']}")
- return False
-
- # Backup current config
- self._backup_service_config(service)
-
- # Update configuration
- self.configs[service] = config
- self._save_all_configs()
-
- logger.info(f"Updated configuration for {service}")
- return True
-
- except Exception as e:
- logger.error(f"Error updating config for {service}: {e}")
- return False
-
- def validate_config(self, service: str, config: Dict[str, Any]) -> Dict[str, Any]:
- """Validate configuration for a service"""
- if service not in self.service_schemas:
- return {
- "valid": False,
- "errors": [f"Unknown service: {service}"],
- "warnings": []
- }
-
- schema = self.service_schemas[service]
- errors = []
- warnings = []
-
- # Check required fields
- for field in schema['required']:
- if field not in config:
- errors.append(f"Missing required field: {field}")
- elif field in schema['types']:
- expected_type = schema['types'][field]
- if not isinstance(config[field], expected_type):
- errors.append(f"Field {field} must be of type {expected_type.__name__}")
-
- # Check optional fields
- for field in schema['optional']:
- if field in config and field in schema['types']:
- expected_type = schema['types'][field]
- if not isinstance(config[field], expected_type):
- warnings.append(f"Field {field} should be of type {expected_type.__name__}")
-
- return {
- "valid": len(errors) == 0,
- "errors": errors,
- "warnings": warnings
- }
-
- def backup_config(self) -> str:
- """Create a backup of all configurations"""
- try:
- timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
- backup_id = f"backup_{timestamp}"
- backup_path = self.backup_dir / backup_id
-
- # Create backup directory
- backup_path.mkdir(parents=True, exist_ok=True)
-
- # Copy all config files
- shutil.copy2(self.config_file, backup_path / 'cell_config.json')
-
- # Copy secrets file if it exists
- if self.secrets_file.exists():
- shutil.copy2(self.secrets_file, backup_path / 'secrets.yaml')
-
- # Create backup manifest
- manifest = {
- "backup_id": backup_id,
- "timestamp": datetime.now().isoformat(),
- "services": list(self.service_schemas.keys()),
- "files": [f.name for f in backup_path.iterdir()]
- }
-
- with open(backup_path / 'manifest.json', 'w') as f:
- json.dump(manifest, f, indent=2)
-
- logger.info(f"Created configuration backup: {backup_id}")
- return backup_id
-
- except Exception as e:
- logger.error(f"Error creating backup: {e}")
- raise
-
- def restore_config(self, backup_id: str) -> bool:
- """Restore configuration from backup"""
- try:
- backup_path = self.backup_dir / backup_id
- if not backup_path.exists():
- raise ValueError(f"Backup {backup_id} not found")
- # Read manifest
- manifest_file = backup_path / 'manifest.json'
- if not manifest_file.exists():
- raise ValueError(f"Backup manifest not found")
- with open(manifest_file, 'r') as f:
- manifest = json.load(f)
- # Restore config files
- config_backup = backup_path / 'cell_config.json'
- if config_backup.exists():
- shutil.copy2(config_backup, self.config_file)
- # Restore secrets file if it exists
- secrets_backup = backup_path / 'secrets.yaml'
- if secrets_backup.exists():
- shutil.copy2(secrets_backup, self.secrets_file)
- # Reload configurations
- self.configs = self._load_all_configs()
- # Ensure all configs have required fields
- for service, schema in self.service_schemas.items():
- config = self.configs.get(service, {})
- for field in schema['required']:
- if field not in config:
- # Set a default value based on type
- t = schema['types'][field]
- if t is int:
- config[field] = 0
- elif t is str:
- config[field] = ''
- elif t is list:
- config[field] = []
- elif t is bool:
- config[field] = False
- self.configs[service] = config
- # Write back to file
- self._save_all_configs()
- logger.info(f"Restored configuration from backup: {backup_id}")
- return True
- except Exception as e:
- logger.error(f"Error restoring backup {backup_id}: {e}")
- return False
-
- def list_backups(self) -> List[Dict[str, Any]]:
- """List all available backups"""
- backups = []
- for backup_dir in self.backup_dir.iterdir():
- if backup_dir.is_dir():
- manifest_file = backup_dir / 'manifest.json'
- if manifest_file.exists():
- try:
- with open(manifest_file, 'r') as f:
- manifest = json.load(f)
- backups.append(manifest)
- except Exception as e:
- logger.error(f"Error reading backup manifest {backup_dir.name}: {e}")
-
- return sorted(backups, key=lambda x: x['timestamp'], reverse=True)
-
- def delete_backup(self, backup_id: str) -> bool:
- """Delete a backup"""
- try:
- backup_path = self.backup_dir / backup_id
- if not backup_path.exists():
- raise ValueError(f"Backup {backup_id} not found")
-
- shutil.rmtree(backup_path)
- logger.info(f"Deleted backup: {backup_id}")
- return True
-
- except Exception as e:
- logger.error(f"Error deleting backup {backup_id}: {e}")
- return False
-
- def get_config_hash(self, service: str) -> str:
- """Get hash of service configuration for change detection"""
- config = self.get_service_config(service)
- config_str = json.dumps(config, sort_keys=True)
- return hashlib.sha256(config_str.encode()).hexdigest()
-
- def has_config_changed(self, service: str, previous_hash: str) -> bool:
- """Check if configuration has changed"""
- current_hash = self.get_config_hash(service)
- return current_hash != previous_hash
-
- def export_config(self, format: str = 'json') -> str:
- """Export all configurations in specified format"""
- try:
- if format == 'json':
- return json.dumps(self.configs, indent=2)
- elif format == 'yaml':
- return yaml.dump(self.configs, default_flow_style=False)
- else:
- raise ValueError(f"Unsupported format: {format}")
- except Exception as e:
- logger.error(f"Error exporting config: {e}")
- raise
-
- def import_config(self, config_data: str, format: str = 'json') -> bool:
- """Import configurations from string"""
- try:
- if format == 'json':
- configs = json.loads(config_data)
- elif format == 'yaml':
- configs = yaml.safe_load(config_data)
- else:
- raise ValueError(f"Unsupported format: {format}")
- # Validate and update each service config
- for service, config in configs.items():
- if service in self.service_schemas:
- self.update_service_config(service, config)
- # Ensure all configs have required fields
- for service, schema in self.service_schemas.items():
- config = self.get_service_config(service)
- for field in schema['required']:
- if field not in config:
- t = schema['types'][field]
- if t is int:
- config[field] = 0
- elif t is str:
- config[field] = ''
- elif t is list:
- config[field] = []
- elif t is bool:
- config[field] = False
- # Write back to file
- self._save_all_configs()
- logger.info("Imported configurations successfully")
- return True
- except Exception as e:
- logger.error(f"Error importing config: {e}")
- return False
-
- def _backup_service_config(self, service: str):
- """Create backup of specific service config before update"""
- # No-op for unified config, but keep for compatibility
- pass
-
- def get_all_configs(self) -> Dict[str, Dict]:
- """Get all service configurations"""
- return self.configs.copy()
-
- def get_config_summary(self) -> Dict[str, Any]:
- """Get summary of all configurations"""
- summary = {
- "total_services": len(self.service_schemas),
- "configured_services": [],
- "unconfigured_services": [],
- "backup_count": len(self.list_backups()),
- "last_backup": None
- }
-
- backups = self.list_backups()
- if backups:
- summary["last_backup"] = backups[0]["timestamp"]
-
- for service in self.service_schemas.keys():
- config = self.get_service_config(service)
- if config and not config.get("error"):
- summary["configured_services"].append(service)
- else:
- summary["unconfigured_services"].append(service)
-
+#!/usr/bin/env python3
+"""
+Configuration Manager for Personal Internet Cell
+Centralized configuration management for all services
+"""
+
+import os
+import json
+import yaml
+import shutil
+import hashlib
+from datetime import datetime
+from typing import Dict, List, Optional, Any
+from pathlib import Path
+import logging
+
+logger = logging.getLogger(__name__)
+
+class ConfigManager:
+ """Centralized configuration management for all services (unified config)"""
+
+ def __init__(self, config_file: str = '/app/config/cell_config.json', data_dir: str = '/app/data'):
+ config_file = Path(config_file)
+ if config_file.is_dir():
+ config_file = config_file / 'cell_config.json'
+ print(f"[DEBUG] ConfigManager.__init__: config_file = {config_file}")
+ self.config_file = config_file
+ self.data_dir = Path(data_dir)
+ self.backup_dir = self.data_dir / 'config_backups'
+ self.secrets_file = self.config_file.parent / 'secrets.yaml'
+ self.backup_dir.mkdir(parents=True, exist_ok=True)
+ self.service_schemas = self._load_service_schemas()
+ self.configs = self._load_all_configs()
+
+ def _load_service_schemas(self) -> Dict[str, Dict]:
+ """Load configuration schemas for all services"""
+ return {
+ 'network': {
+ 'required': ['dns_port', 'dhcp_range', 'ntp_servers'],
+ 'optional': ['dns_zones', 'dhcp_reservations'],
+ 'types': {
+ 'dns_port': int,
+ 'dhcp_range': str,
+ 'ntp_servers': list
+ }
+ },
+ 'wireguard': {
+ 'required': ['port', 'private_key', 'address'],
+ 'optional': ['peers', 'allowed_ips'],
+ 'types': {
+ 'port': int,
+ 'private_key': str,
+ 'address': str
+ }
+ },
+ 'email': {
+ 'required': ['domain', 'smtp_port', 'imap_port'],
+ 'optional': ['users', 'ssl_cert', 'ssl_key'],
+ 'types': {
+ 'smtp_port': int,
+ 'imap_port': int,
+ 'domain': str
+ }
+ },
+ 'calendar': {
+ 'required': ['port', 'data_dir'],
+ 'optional': ['users', 'calendars'],
+ 'types': {
+ 'port': int,
+ 'data_dir': str
+ }
+ },
+ 'files': {
+ 'required': ['port', 'data_dir'],
+ 'optional': ['users', 'quota'],
+ 'types': {
+ 'port': int,
+ 'data_dir': str,
+ 'quota': int
+ }
+ },
+ 'routing': {
+ 'required': ['nat_enabled', 'firewall_enabled'],
+ 'optional': ['nat_rules', 'firewall_rules', 'peer_routes'],
+ 'types': {
+ 'nat_enabled': bool,
+ 'firewall_enabled': bool
+ }
+ },
+ 'vault': {
+ 'required': ['ca_configured', 'fernet_configured'],
+ 'optional': ['certificates', 'trusted_keys'],
+ 'types': {
+ 'ca_configured': bool,
+ 'fernet_configured': bool
+ }
+ }
+ }
+
+ def _load_all_configs(self) -> Dict[str, Dict]:
+ """Load all existing service configurations"""
+ if self.config_file.exists():
+ try:
+ with open(self.config_file, 'r') as f:
+ return json.load(f)
+ except Exception as e:
+ logger.error(f"Error loading unified config: {e}")
+ return {}
+ return {}
+
+ def _save_all_configs(self):
+ """Save all service configurations to the unified config file"""
+ with open(self.config_file, 'w') as f:
+ json.dump(self.configs, f, indent=2)
+
+ def get_service_config(self, service: str) -> Dict[str, Any]:
+ """Get configuration for a specific service"""
+ if service not in self.service_schemas:
+ raise ValueError(f"Unknown service: {service}")
+ return self.configs.get(service, {})
+
+ def update_service_config(self, service: str, config: Dict[str, Any]) -> bool:
+ """Update configuration for a specific service"""
+ if service not in self.service_schemas:
+ raise ValueError(f"Unknown service: {service}")
+ try:
+ # Validate configuration
+ validation = self.validate_config(service, config)
+ if not validation['valid']:
+ logger.error(f"Invalid config for {service}: {validation['errors']}")
+ return False
+
+ # Backup current config
+ self._backup_service_config(service)
+
+ # Update configuration
+ self.configs[service] = config
+ self._save_all_configs()
+
+ logger.info(f"Updated configuration for {service}")
+ return True
+
+ except Exception as e:
+ logger.error(f"Error updating config for {service}: {e}")
+ return False
+
+ def validate_config(self, service: str, config: Dict[str, Any]) -> Dict[str, Any]:
+ """Validate configuration for a service"""
+ if service not in self.service_schemas:
+ return {
+ "valid": False,
+ "errors": [f"Unknown service: {service}"],
+ "warnings": []
+ }
+
+ schema = self.service_schemas[service]
+ errors = []
+ warnings = []
+
+ # Check required fields
+ for field in schema['required']:
+ if field not in config:
+ errors.append(f"Missing required field: {field}")
+ elif field in schema['types']:
+ expected_type = schema['types'][field]
+ if not isinstance(config[field], expected_type):
+ errors.append(f"Field {field} must be of type {expected_type.__name__}")
+
+ # Check optional fields
+ for field in schema['optional']:
+ if field in config and field in schema['types']:
+ expected_type = schema['types'][field]
+ if not isinstance(config[field], expected_type):
+ warnings.append(f"Field {field} should be of type {expected_type.__name__}")
+
+ return {
+ "valid": len(errors) == 0,
+ "errors": errors,
+ "warnings": warnings
+ }
+
+ def backup_config(self) -> str:
+ """Create a backup of all configurations"""
+ try:
+ timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
+ backup_id = f"backup_{timestamp}"
+ backup_path = self.backup_dir / backup_id
+
+ # Create backup directory
+ backup_path.mkdir(parents=True, exist_ok=True)
+
+ # Copy all config files
+ shutil.copy2(self.config_file, backup_path / 'cell_config.json')
+
+ # Copy secrets file if it exists
+ if self.secrets_file.exists():
+ shutil.copy2(self.secrets_file, backup_path / 'secrets.yaml')
+
+ # Create backup manifest
+ manifest = {
+ "backup_id": backup_id,
+ "timestamp": datetime.now().isoformat(),
+ "services": list(self.service_schemas.keys()),
+ "files": [f.name for f in backup_path.iterdir()]
+ }
+
+ with open(backup_path / 'manifest.json', 'w') as f:
+ json.dump(manifest, f, indent=2)
+
+ logger.info(f"Created configuration backup: {backup_id}")
+ return backup_id
+
+ except Exception as e:
+ logger.error(f"Error creating backup: {e}")
+ raise
+
+ def restore_config(self, backup_id: str) -> bool:
+ """Restore configuration from backup"""
+ try:
+ backup_path = self.backup_dir / backup_id
+ if not backup_path.exists():
+ raise ValueError(f"Backup {backup_id} not found")
+ # Read manifest
+ manifest_file = backup_path / 'manifest.json'
+ if not manifest_file.exists():
+ raise ValueError(f"Backup manifest not found")
+ with open(manifest_file, 'r') as f:
+ manifest = json.load(f)
+ # Restore config files
+ config_backup = backup_path / 'cell_config.json'
+ if config_backup.exists():
+ shutil.copy2(config_backup, self.config_file)
+ # Restore secrets file if it exists
+ secrets_backup = backup_path / 'secrets.yaml'
+ if secrets_backup.exists():
+ shutil.copy2(secrets_backup, self.secrets_file)
+ # Reload configurations
+ self.configs = self._load_all_configs()
+ # Ensure all configs have required fields
+ for service, schema in self.service_schemas.items():
+ config = self.configs.get(service, {})
+ for field in schema['required']:
+ if field not in config:
+ # Set a default value based on type
+ t = schema['types'][field]
+ if t is int:
+ config[field] = 0
+ elif t is str:
+ config[field] = ''
+ elif t is list:
+ config[field] = []
+ elif t is bool:
+ config[field] = False
+ self.configs[service] = config
+
+ # Write back to file
+ self._save_all_configs()
+ logger.info(f"Restored configuration from backup: {backup_id}")
+ return True
+ except Exception as e:
+ logger.error(f"Error restoring backup {backup_id}: {e}")
+ return False
+
+ def list_backups(self) -> List[Dict[str, Any]]:
+ """List all available backups"""
+ backups = []
+ for backup_dir in self.backup_dir.iterdir():
+ if backup_dir.is_dir():
+ manifest_file = backup_dir / 'manifest.json'
+ if manifest_file.exists():
+ try:
+ with open(manifest_file, 'r') as f:
+ manifest = json.load(f)
+ backups.append(manifest)
+ except Exception as e:
+ logger.error(f"Error reading backup manifest {backup_dir.name}: {e}")
+
+ return sorted(backups, key=lambda x: x['timestamp'], reverse=True)
+
+ def delete_backup(self, backup_id: str) -> bool:
+ """Delete a backup"""
+ try:
+ backup_path = self.backup_dir / backup_id
+ if not backup_path.exists():
+ raise ValueError(f"Backup {backup_id} not found")
+
+ shutil.rmtree(backup_path)
+ logger.info(f"Deleted backup: {backup_id}")
+ return True
+
+ except Exception as e:
+ logger.error(f"Error deleting backup {backup_id}: {e}")
+ return False
+
+ def get_config_hash(self, service: str) -> str:
+ """Get hash of service configuration for change detection"""
+ config = self.get_service_config(service)
+ config_str = json.dumps(config, sort_keys=True)
+ return hashlib.sha256(config_str.encode()).hexdigest()
+
+ def has_config_changed(self, service: str, previous_hash: str) -> bool:
+ """Check if configuration has changed"""
+ current_hash = self.get_config_hash(service)
+ return current_hash != previous_hash
+
+ def export_config(self, format: str = 'json') -> str:
+ """Export all configurations in specified format"""
+ try:
+ if format == 'json':
+ return json.dumps(self.configs, indent=2)
+ elif format == 'yaml':
+ return yaml.dump(self.configs, default_flow_style=False)
+ else:
+ raise ValueError(f"Unsupported format: {format}")
+ except Exception as e:
+ logger.error(f"Error exporting config: {e}")
+ raise
+
+ def import_config(self, config_data: str, format: str = 'json') -> bool:
+ """Import configurations from string"""
+ try:
+ if format == 'json':
+ configs = json.loads(config_data)
+ elif format == 'yaml':
+ configs = yaml.safe_load(config_data)
+ else:
+ raise ValueError(f"Unsupported format: {format}")
+ # Validate and update each service config
+ for service, config in configs.items():
+ if service in self.service_schemas:
+ self.update_service_config(service, config)
+ # Ensure all configs have required fields
+ for service, schema in self.service_schemas.items():
+ config = self.get_service_config(service)
+ for field in schema['required']:
+ if field not in config:
+ t = schema['types'][field]
+ if t is int:
+ config[field] = 0
+ elif t is str:
+ config[field] = ''
+ elif t is list:
+ config[field] = []
+ elif t is bool:
+ config[field] = False
+ # Write back to file
+ self._save_all_configs()
+ logger.info("Imported configurations successfully")
+ return True
+ except Exception as e:
+ logger.error(f"Error importing config: {e}")
+ return False
+
+ def _backup_service_config(self, service: str):
+ """Create backup of specific service config before update"""
+ # No-op for unified config, but keep for compatibility
+ pass
+
+ def get_all_configs(self) -> Dict[str, Dict]:
+ """Get all service configurations"""
+ return self.configs.copy()
+
+ def get_config_summary(self) -> Dict[str, Any]:
+ """Get summary of all configurations"""
+ summary = {
+ "total_services": len(self.service_schemas),
+ "configured_services": [],
+ "unconfigured_services": [],
+ "backup_count": len(self.list_backups()),
+ "last_backup": None
+ }
+
+ backups = self.list_backups()
+ if backups:
+ summary["last_backup"] = backups[0]["timestamp"]
+
+ for service in self.service_schemas.keys():
+ config = self.get_service_config(service)
+ if config and not config.get("error"):
+ summary["configured_services"].append(service)
+ else:
+ summary["unconfigured_services"].append(service)
+
return summary
\ No newline at end of file
diff --git a/api/email_manager.py b/api/email_manager.py
index b7a34d7..98bdb90 100644
--- a/api/email_manager.py
+++ b/api/email_manager.py
@@ -35,12 +35,13 @@ class EmailManager(BaseServiceManager):
is_docker = os.path.exists('/.dockerenv') or os.environ.get('DOCKER_CONTAINER') == 'true'
if is_docker:
- # Return positive status when running in Docker
+ # Check if email container is actually running
+ container_running = self._check_email_container_status()
status = {
- 'running': True,
- 'status': 'online',
- 'smtp_running': True,
- 'imap_running': True,
+ 'running': container_running,
+ 'status': 'online' if container_running else 'offline',
+ 'smtp_running': container_running,
+ 'imap_running': container_running,
'users_count': 0,
'domain': 'cell.local',
'timestamp': datetime.utcnow().isoformat()
@@ -106,6 +107,16 @@ class EmailManager(BaseServiceManager):
except Exception:
return False
+ def _check_email_container_status(self) -> bool:
+ """Check if email Docker container is running"""
+ try:
+ import docker
+ client = docker.from_env()
+ containers = client.containers.list(filters={'name': 'cell-mail'})
+ return len(containers) > 0
+ except Exception:
+ return False
+
def _test_smtp_connectivity(self) -> Dict[str, Any]:
"""Test SMTP connectivity"""
try:
diff --git a/api/file_manager.py b/api/file_manager.py
index 2044a69..97dbe8b 100644
--- a/api/file_manager.py
+++ b/api/file_manager.py
@@ -478,11 +478,12 @@ umask = 022
is_docker = os.path.exists('/.dockerenv') or os.environ.get('DOCKER_CONTAINER') == 'true'
if is_docker:
- # Return positive status when running in Docker
+ # Check if file container is actually running
+ container_running = self._check_file_container_status()
status = {
- 'running': True,
- 'status': 'online',
- 'webdav_status': {'running': True, 'port': 8080},
+ 'running': container_running,
+ 'status': 'online' if container_running else 'offline',
+ 'webdav_status': {'running': container_running, 'port': 8080},
'users_count': 0,
'total_storage_used': {'bytes': 0, 'human_readable': '0 B'},
'timestamp': datetime.utcnow().isoformat()
@@ -505,6 +506,16 @@ umask = 022
except Exception as e:
return self.handle_error(e, "get_status")
+ def _check_file_container_status(self) -> bool:
+ """Check if file Docker container is running"""
+ try:
+ import docker
+ client = docker.from_env()
+ containers = client.containers.list(filters={'name': 'cell-webdav'})
+ return len(containers) > 0
+ except Exception:
+ return False
+
def test_connectivity(self) -> Dict[str, Any]:
"""Test file service connectivity"""
try:
diff --git a/api/log_manager.py b/api/log_manager.py
index 719018d..fe2b324 100644
--- a/api/log_manager.py
+++ b/api/log_manager.py
@@ -1,485 +1,524 @@
-#!/usr/bin/env python3
-"""
-Log Manager for Personal Internet Cell
-Comprehensive logging management for all services
-"""
-
-import os
-import json
-import logging
-import logging.handlers
-from datetime import datetime, timedelta
-from typing import Dict, List, Optional, Any, Tuple
-from pathlib import Path
-import re
-import gzip
-import shutil
-from collections import defaultdict
-import threading
-import time
-from enum import Enum
-
-logger = logging.getLogger(__name__)
-
-class LogLevel(Enum):
- """Log levels"""
- DEBUG = "DEBUG"
- INFO = "INFO"
- WARNING = "WARNING"
- ERROR = "ERROR"
- CRITICAL = "CRITICAL"
-
-class LogManager:
- """Comprehensive logging management for all services"""
-
- def __init__(self, log_dir: str = '/app/logs', max_file_size: int = 10 * 1024 * 1024,
- backup_count: int = 5):
- self.log_dir = Path(log_dir)
- self.max_file_size = max_file_size
- self.backup_count = backup_count
-
- # Ensure log directory exists
- self.log_dir.mkdir(parents=True, exist_ok=True)
-
- # Service loggers
- self.service_loggers: Dict[str, logging.Logger] = {}
-
- # Log formatters
- self.formatters = {
- 'json': self._create_json_formatter(),
- 'text': self._create_text_formatter(),
- 'detailed': self._create_detailed_formatter()
- }
-
- # Log handlers
- self.handlers: Dict[str, Dict[str, logging.Handler]] = defaultdict(dict)
-
- # Log statistics
- self.log_stats = defaultdict(lambda: {
- 'total_entries': 0,
- 'error_count': 0,
- 'warning_count': 0,
- 'last_entry': None
- })
-
- # Log rotation thread
- self.rotation_thread = None
- self.running = False
-
- # Start log rotation monitoring
- self._start_rotation_monitor()
-
- def _create_json_formatter(self) -> logging.Formatter:
- """Create JSON formatter for structured logging"""
- class JsonFormatter(logging.Formatter):
- def format(self, record):
- log_entry = {
- 'timestamp': self.formatTime(record),
- 'level': record.levelname,
- 'logger': record.name,
- 'message': record.getMessage(),
- 'module': record.module,
- 'function': record.funcName,
- 'line': record.lineno
- }
-
- # Add extra fields if present
- for key, value in record.__dict__.items():
- if key not in ['name', 'msg', 'args', 'levelname', 'levelno', 'pathname',
- 'filename', 'module', 'lineno', 'funcName', 'created',
- 'msecs', 'relativeCreated', 'thread', 'threadName',
- 'processName', 'process', 'getMessage', 'exc_info',
- 'exc_text', 'stack_info']:
- log_entry[key] = value
-
- # Add exception info if present
- if record.exc_info:
- log_entry['exception'] = self.formatException(record.exc_info)
-
- return json.dumps(log_entry)
-
- return JsonFormatter()
-
- def _create_text_formatter(self) -> logging.Formatter:
- """Create text formatter for human-readable logs"""
- return logging.Formatter(
- '%(asctime)s - %(name)s - %(levelname)s - %(message)s',
- datefmt='%Y-%m-%d %H:%M:%S'
- )
-
- def _create_detailed_formatter(self) -> logging.Formatter:
- """Create detailed formatter with extra information"""
- return logging.Formatter(
- '%(asctime)s - %(name)s - %(levelname)s - %(module)s:%(funcName)s:%(lineno)d - %(message)s',
- datefmt='%Y-%m-%d %H:%M:%S'
- )
-
- def add_service_logger(self, service: str, config: Dict[str, Any]):
- """Add a logger for a specific service"""
- try:
- # Create service logger
- service_logger = logging.getLogger(f'picell.{service}')
- service_logger.setLevel(getattr(logging, config.get('level', 'INFO')))
-
- # Create log file path
- log_file = self.log_dir / f'{service}.log'
-
- # Create rotating file handler
- handler = logging.handlers.RotatingFileHandler(
- log_file,
- maxBytes=self.max_file_size,
- backupCount=self.backup_count,
- encoding='utf-8'
- )
-
- # Set formatter
- formatter_name = config.get('formatter', 'json')
- handler.setFormatter(self.formatters[formatter_name])
-
- # Add handler to logger
- service_logger.addHandler(handler)
-
- # Store logger and handler
- self.service_loggers[service] = service_logger
- self.handlers[service]['file'] = handler
-
- # Add console handler if requested
- if config.get('console', False):
- console_handler = logging.StreamHandler()
- console_handler.setFormatter(self.formatters[formatter_name])
- service_logger.addHandler(console_handler)
- self.handlers[service]['console'] = console_handler
-
- logger.info(f"Added logger for service: {service}")
-
- except Exception as e:
- logger.error(f"Error adding logger for {service}: {e}")
-
- def get_service_logs(self, service: str, level: str = 'INFO', lines: int = 50) -> List[str]:
- """Get logs for a specific service"""
- try:
- log_file = self.log_dir / f'{service}.log'
- if not log_file.exists():
- return [f"No log file found for service: {service}"]
-
- # Read log file
- with open(log_file, 'r', encoding='utf-8', errors='ignore') as f:
- all_lines = f.readlines()
-
- # Filter by level if specified
- if level != 'ALL':
- filtered_lines = []
- for line in all_lines:
- if self._is_log_level(line, level):
- filtered_lines.append(line)
- all_lines = filtered_lines
-
- # Return last N lines
- return all_lines[-lines:] if lines > 0 else all_lines
-
- except Exception as e:
- logger.error(f"Error reading logs for {service}: {e}")
- return [f"Error reading logs: {str(e)}"]
-
- def search_logs(self, query: str, time_range: Optional[Tuple[datetime, datetime]] = None,
- services: Optional[List[str]] = None, level: Optional[str] = None) -> List[Dict[str, Any]]:
- """Search logs across all services"""
- results = []
-
- # Determine which services to search
- if services is None:
- services = list(self.service_loggers.keys())
-
- for service in services:
- try:
- log_file = self.log_dir / f'{service}.log'
- if not log_file.exists():
- continue
-
- with open(log_file, 'r', encoding='utf-8', errors='ignore') as f:
- for line_num, line in enumerate(f, 1):
- # Parse JSON log entry
- try:
- log_entry = json.loads(line.strip())
-
- # Apply filters
- if not self._matches_search_criteria(log_entry, query, time_range, level):
- continue
-
- log_entry['service'] = service
- log_entry['line_number'] = line_num
- results.append(log_entry)
-
- except json.JSONDecodeError:
- # Handle non-JSON logs
- if query.lower() in line.lower():
- results.append({
- 'service': service,
- 'line_number': line_num,
- 'raw_line': line.strip(),
- 'timestamp': datetime.now().isoformat()
- })
-
- except Exception as e:
- logger.error(f"Error searching logs for {service}: {e}")
-
- # Sort by timestamp
- results.sort(key=lambda x: x.get('timestamp', ''), reverse=True)
- return results
-
- def _matches_search_criteria(self, log_entry: Dict[str, Any], query: str,
- time_range: Optional[Tuple[datetime, datetime]],
- level: Optional[str]) -> bool:
- """Check if log entry matches search criteria"""
- # Check query
- if query:
- message = log_entry.get('message', '').lower()
- if query.lower() not in message:
- return False
-
- # Check time range
- if time_range:
- try:
- log_time = datetime.fromisoformat(log_entry.get('timestamp', ''))
- if not (time_range[0] <= log_time <= time_range[1]):
- return False
- except (ValueError, TypeError):
- return False
-
- # Check level
- if level:
- if log_entry.get('level', '').upper() != level.upper():
- return False
-
- return True
-
- def _is_log_level(self, line: str, level: str) -> bool:
- """Check if log line matches specified level"""
- try:
- # Try to parse as JSON
- log_entry = json.loads(line.strip())
- return log_entry.get('level', '').upper() == level.upper()
- except json.JSONDecodeError:
- # Fallback to text parsing
- level_pattern = rf'\b{level.upper()}\b'
- return bool(re.search(level_pattern, line.upper()))
-
- def export_logs(self, format: str = 'json', filters: Optional[Dict[str, Any]] = None) -> str:
- """Export logs in specified format"""
- try:
- if filters is None:
- filters = {}
-
- # Get logs based on filters
- services = filters.get('services', list(self.service_loggers.keys()))
- level = filters.get('level')
- time_range = filters.get('time_range')
- query = filters.get('query', '')
-
- logs = self.search_logs(query, time_range, services, level)
-
- if format == 'json':
- return json.dumps(logs, indent=2)
- elif format == 'csv':
- return self._logs_to_csv(logs)
- elif format == 'text':
- return self._logs_to_text(logs)
- else:
- raise ValueError(f"Unsupported export format: {format}")
-
- except Exception as e:
- logger.error(f"Error exporting logs: {e}")
- raise
-
- def _logs_to_csv(self, logs: List[Dict[str, Any]]) -> str:
- """Convert logs to CSV format"""
- if not logs:
- return ""
-
- # Get all possible fields
- fields = set()
- for log in logs:
- fields.update(log.keys())
-
- fields = sorted(list(fields))
-
- # Create CSV
- csv_lines = [','.join(fields)]
- for log in logs:
- row = [str(log.get(field, '')) for field in fields]
- csv_lines.append(','.join(row))
-
- return '\n'.join(csv_lines)
-
- def _logs_to_text(self, logs: List[Dict[str, Any]]) -> str:
- """Convert logs to text format"""
- text_lines = []
- for log in logs:
- timestamp = log.get('timestamp', '')
- level = log.get('level', '')
- service = log.get('service', '')
- message = log.get('message', '')
- text_lines.append(f"{timestamp} [{level}] {service}: {message}")
-
- return '\n'.join(text_lines)
-
- def get_log_statistics(self, service: Optional[str] = None) -> Dict[str, Any]:
- """Get log statistics"""
- stats = {}
-
- if service:
- services = [service]
- else:
- services = list(self.service_loggers.keys())
-
- for svc in services:
- try:
- log_file = self.log_dir / f'{svc}.log'
- if not log_file.exists():
- stats[svc] = {'error': 'Log file not found'}
- continue
-
- # Count log entries by level
- level_counts = defaultdict(int)
- total_entries = 0
- last_entry = None
-
- with open(log_file, 'r', encoding='utf-8', errors='ignore') as f:
- for line in f:
- try:
- log_entry = json.loads(line.strip())
- level = log_entry.get('level', 'UNKNOWN')
- level_counts[level] += 1
- total_entries += 1
- last_entry = log_entry.get('timestamp')
- except json.JSONDecodeError:
- total_entries += 1
-
- stats[svc] = {
- 'total_entries': total_entries,
- 'level_counts': dict(level_counts),
- 'last_entry': last_entry,
- 'file_size': log_file.stat().st_size
- }
-
- except Exception as e:
- stats[svc] = {'error': str(e)}
-
- return stats
-
- def rotate_logs(self, service: Optional[str] = None):
- """Manually rotate logs"""
- try:
- if service:
- services = [service]
- else:
- services = list(self.service_loggers.keys())
-
- for svc in services:
- if svc in self.handlers and 'file' in self.handlers[svc]:
- handler = self.handlers[svc]['file']
- handler.doRollover()
- logger.info(f"Rotated logs for service: {svc}")
-
- except Exception as e:
- logger.error(f"Error rotating logs: {e}")
-
- def cleanup_old_logs(self, days: int = 30):
- """Clean up log files older than specified days"""
- try:
- cutoff_date = datetime.now() - timedelta(days=days)
- deleted_count = 0
-
- for log_file in self.log_dir.glob('*.log.*'):
- try:
- file_time = datetime.fromtimestamp(log_file.stat().st_mtime)
- if file_time < cutoff_date:
- log_file.unlink()
- deleted_count += 1
- except Exception as e:
- logger.warning(f"Error checking file {log_file}: {e}")
-
- logger.info(f"Cleaned up {deleted_count} old log files")
-
- except Exception as e:
- logger.error(f"Error cleaning up old logs: {e}")
-
- def _start_rotation_monitor(self):
- """Start automatic log rotation monitoring"""
- self.running = True
- self.rotation_thread = threading.Thread(target=self._rotation_monitor_loop, daemon=True)
- self.rotation_thread.start()
-
- def _rotation_monitor_loop(self):
- """Monitor and rotate logs automatically"""
- while self.running:
- try:
- # Check each service's log file size
- for service in self.service_loggers.keys():
- log_file = self.log_dir / f'{service}.log'
- if log_file.exists() and log_file.stat().st_size > self.max_file_size:
- self.rotate_logs(service)
-
- # Sleep for 1 hour before next check
- time.sleep(3600)
-
- except Exception as e:
- logger.error(f"Error in rotation monitor: {e}")
- time.sleep(60) # Sleep for 1 minute on error
-
- def stop(self):
- """Stop the log manager"""
- self.running = False
- if self.rotation_thread:
- self.rotation_thread.join(timeout=5)
-
- # Close all handlers
- for service_handlers in self.handlers.values():
- for handler in service_handlers.values():
- handler.close()
-
- logger.info("Log manager stopped")
-
- def get_log_file_info(self, service: str) -> Dict[str, Any]:
- """Get information about a service's log file"""
- try:
- log_file = self.log_dir / f'{service}.log'
- if not log_file.exists():
- return {'error': 'Log file not found'}
-
- stat = log_file.stat()
- return {
- 'file_path': str(log_file),
- 'file_size': stat.st_size,
- 'created': datetime.fromtimestamp(stat.st_ctime).isoformat(),
- 'modified': datetime.fromtimestamp(stat.st_mtime).isoformat(),
- 'exists': True
- }
-
- except Exception as e:
- return {'error': str(e)}
-
- def compress_old_logs(self):
- """Compress old log files to save space"""
- try:
- compressed_count = 0
-
- for log_file in self.log_dir.glob('*.log.*'):
- if not log_file.name.endswith('.gz'):
- try:
- with open(log_file, 'rb') as f_in:
- gz_file = log_file.with_suffix(log_file.suffix + '.gz')
- with gzip.open(gz_file, 'wb') as f_out:
- shutil.copyfileobj(f_in, f_out)
-
- # Remove original file
- log_file.unlink()
- compressed_count += 1
-
- except Exception as e:
- logger.warning(f"Error compressing {log_file}: {e}")
-
- logger.info(f"Compressed {compressed_count} log files")
-
- except Exception as e:
+#!/usr/bin/env python3
+"""
+Log Manager for Personal Internet Cell
+Comprehensive logging management for all services
+"""
+
+import os
+import json
+import logging
+import logging.handlers
+from datetime import datetime, timedelta
+from typing import Dict, List, Optional, Any, Tuple
+from pathlib import Path
+import re
+import gzip
+import shutil
+from collections import defaultdict
+import threading
+import time
+from enum import Enum
+
+logger = logging.getLogger(__name__)
+
+class LogLevel(Enum):
+ """Log levels"""
+ DEBUG = "DEBUG"
+ INFO = "INFO"
+ WARNING = "WARNING"
+ ERROR = "ERROR"
+ CRITICAL = "CRITICAL"
+
+class LogManager:
+ """Comprehensive logging management for all services"""
+
+ def __init__(self, log_dir: str = '/app/logs', max_file_size: int = 10 * 1024 * 1024,
+ backup_count: int = 5):
+ self.log_dir = Path(log_dir)
+ self.max_file_size = max_file_size
+ self.backup_count = backup_count
+
+ # Ensure log directory exists
+ self.log_dir.mkdir(parents=True, exist_ok=True)
+
+ # Service loggers
+ self.service_loggers: Dict[str, logging.Logger] = {}
+
+ # Log formatters
+ self.formatters = {
+ 'json': self._create_json_formatter(),
+ 'text': self._create_text_formatter(),
+ 'detailed': self._create_detailed_formatter()
+ }
+
+ # Log handlers
+ self.handlers: Dict[str, Dict[str, logging.Handler]] = defaultdict(dict)
+
+ # Log statistics
+ self.log_stats = defaultdict(lambda: {
+ 'total_entries': 0,
+ 'error_count': 0,
+ 'warning_count': 0,
+ 'last_entry': None
+ })
+
+ # Log rotation thread
+ self.rotation_thread = None
+ self.running = False
+
+ # Start log rotation monitoring
+ self._start_rotation_monitor()
+
+ def _create_json_formatter(self) -> logging.Formatter:
+ """Create JSON formatter for structured logging"""
+ class JsonFormatter(logging.Formatter):
+ def format(self, record):
+ log_entry = {
+ 'timestamp': self.formatTime(record),
+ 'level': record.levelname,
+ 'logger': record.name,
+ 'message': record.getMessage(),
+ 'module': record.module,
+ 'function': record.funcName,
+ 'line': record.lineno
+ }
+
+ # Add extra fields if present
+ for key, value in record.__dict__.items():
+ if key not in ['name', 'msg', 'args', 'levelname', 'levelno', 'pathname',
+ 'filename', 'module', 'lineno', 'funcName', 'created',
+ 'msecs', 'relativeCreated', 'thread', 'threadName',
+ 'processName', 'process', 'getMessage', 'exc_info',
+ 'exc_text', 'stack_info']:
+ log_entry[key] = value
+
+ # Add exception info if present
+ if record.exc_info:
+ log_entry['exception'] = self.formatException(record.exc_info)
+
+ return json.dumps(log_entry)
+
+ return JsonFormatter()
+
+ def _create_text_formatter(self) -> logging.Formatter:
+ """Create text formatter for human-readable logs"""
+ return logging.Formatter(
+ '%(asctime)s - %(name)s - %(levelname)s - %(message)s',
+ datefmt='%Y-%m-%d %H:%M:%S'
+ )
+
+ def _create_detailed_formatter(self) -> logging.Formatter:
+ """Create detailed formatter with extra information"""
+ return logging.Formatter(
+ '%(asctime)s - %(name)s - %(levelname)s - %(module)s:%(funcName)s:%(lineno)d - %(message)s',
+ datefmt='%Y-%m-%d %H:%M:%S'
+ )
+
+ def add_service_logger(self, service: str, config: Dict[str, Any]):
+ """Add a logger for a specific service"""
+ try:
+ # Create service logger
+ service_logger = logging.getLogger(f'picell.{service}')
+ service_logger.setLevel(getattr(logging, config.get('level', 'INFO')))
+
+ # Create log file path
+ log_file = self.log_dir / f'{service}.log'
+
+ # Create rotating file handler
+ handler = logging.handlers.RotatingFileHandler(
+ log_file,
+ maxBytes=self.max_file_size,
+ backupCount=self.backup_count,
+ encoding='utf-8'
+ )
+
+ # Set formatter
+ formatter_name = config.get('formatter', 'json')
+ handler.setFormatter(self.formatters[formatter_name])
+
+ # Add handler to logger
+ service_logger.addHandler(handler)
+
+ # Store logger and handler
+ self.service_loggers[service] = service_logger
+ self.handlers[service]['file'] = handler
+
+ # Add console handler if requested
+ if config.get('console', False):
+ console_handler = logging.StreamHandler()
+ console_handler.setFormatter(self.formatters[formatter_name])
+ service_logger.addHandler(console_handler)
+ self.handlers[service]['console'] = console_handler
+
+ logger.info(f"Added logger for service: {service}")
+
+ except Exception as e:
+ logger.error(f"Error adding logger for {service}: {e}")
+
+ def get_service_logs(self, service: str, level: str = 'INFO', lines: int = 50) -> List[str]:
+ """Get logs for a specific service"""
+ try:
+ log_file = self.log_dir / f'{service}.log'
+ if not log_file.exists():
+ return [f"No log file found for service: {service}"]
+
+ # Read log file
+ with open(log_file, 'r', encoding='utf-8', errors='ignore') as f:
+ all_lines = f.readlines()
+
+ # Filter by level if specified
+ if level != 'ALL':
+ filtered_lines = []
+ for line in all_lines:
+ if self._is_log_level(line, level):
+ filtered_lines.append(line)
+ all_lines = filtered_lines
+
+ # Return last N lines
+ return all_lines[-lines:] if lines > 0 else all_lines
+
+ except Exception as e:
+ logger.error(f"Error reading logs for {service}: {e}")
+ return [f"Error reading logs: {str(e)}"]
+
+ def get_service_logs_parsed(self, service: str, level: str = 'INFO', lines: int = 50) -> List[Dict[str, Any]]:
+ """Get parsed logs for a specific service"""
+ try:
+ log_file = self.log_dir / f'{service}.log'
+ if not log_file.exists():
+ return [{"error": f"No log file found for service: {service}"}]
+
+ results = []
+ with open(log_file, 'r', encoding='utf-8', errors='ignore') as f:
+ all_lines = f.readlines()
+
+ # Process lines in reverse order to get most recent first
+ for line in reversed(all_lines[-lines:] if lines > 0 else all_lines):
+ line = line.strip()
+ if not line:
+ continue
+
+ # Try to parse as JSON
+ try:
+ log_entry = json.loads(line)
+ # Apply level filter
+ if level != 'ALL' and log_entry.get('level', '').upper() != level.upper():
+ continue
+ results.append(log_entry)
+ except json.JSONDecodeError:
+ # Handle non-JSON logs
+ if level == 'ALL' or self._is_log_level(line, level):
+ results.append({
+ 'raw_line': line,
+ 'timestamp': datetime.now().isoformat(),
+ 'level': 'INFO'
+ })
+
+ return results
+
+ except Exception as e:
+ logger.error(f"Error reading parsed logs for {service}: {e}")
+ return [{"error": f"Error reading logs: {str(e)}"}]
+
+ def search_logs(self, query: str, time_range: Optional[Tuple[datetime, datetime]] = None,
+ services: Optional[List[str]] = None, level: Optional[str] = None) -> List[Dict[str, Any]]:
+ """Search logs across all services"""
+ results = []
+
+ # Determine which services to search
+ if services is None:
+ services = list(self.service_loggers.keys())
+
+ for service in services:
+ try:
+ log_file = self.log_dir / f'{service}.log'
+ if not log_file.exists():
+ continue
+
+ with open(log_file, 'r', encoding='utf-8', errors='ignore') as f:
+ for line_num, line in enumerate(f, 1):
+ # Parse JSON log entry
+ try:
+ log_entry = json.loads(line.strip())
+
+ # Apply filters
+ if not self._matches_search_criteria(log_entry, query, time_range, level):
+ continue
+
+ log_entry['service'] = service
+ log_entry['line_number'] = line_num
+ results.append(log_entry)
+
+ except json.JSONDecodeError:
+ # Handle non-JSON logs
+ if query.lower() in line.lower():
+ results.append({
+ 'service': service,
+ 'line_number': line_num,
+ 'raw_line': line.strip(),
+ 'timestamp': datetime.now().isoformat()
+ })
+
+ except Exception as e:
+ logger.error(f"Error searching logs for {service}: {e}")
+
+ # Sort by timestamp
+ results.sort(key=lambda x: x.get('timestamp', ''), reverse=True)
+ return results
+
+ def _matches_search_criteria(self, log_entry: Dict[str, Any], query: str,
+ time_range: Optional[Tuple[datetime, datetime]],
+ level: Optional[str]) -> bool:
+ """Check if log entry matches search criteria"""
+ # Check query
+ if query:
+ message = log_entry.get('message', '').lower()
+ if query.lower() not in message:
+ return False
+
+ # Check time range
+ if time_range:
+ try:
+ log_time = datetime.fromisoformat(log_entry.get('timestamp', ''))
+ if not (time_range[0] <= log_time <= time_range[1]):
+ return False
+ except (ValueError, TypeError):
+ return False
+
+ # Check level
+ if level:
+ if log_entry.get('level', '').upper() != level.upper():
+ return False
+
+ return True
+
+ def _is_log_level(self, line: str, level: str) -> bool:
+ """Check if log line matches specified level"""
+ try:
+ # Try to parse as JSON
+ log_entry = json.loads(line.strip())
+ return log_entry.get('level', '').upper() == level.upper()
+ except json.JSONDecodeError:
+ # Fallback to text parsing
+ level_pattern = rf'\b{level.upper()}\b'
+ return bool(re.search(level_pattern, line.upper()))
+
+ def export_logs(self, format: str = 'json', filters: Optional[Dict[str, Any]] = None) -> str:
+ """Export logs in specified format"""
+ try:
+ if filters is None:
+ filters = {}
+
+ # Get logs based on filters
+ services = filters.get('services', list(self.service_loggers.keys()))
+ level = filters.get('level')
+ time_range = filters.get('time_range')
+ query = filters.get('query', '')
+
+ logs = self.search_logs(query, time_range, services, level)
+
+ if format == 'json':
+ return json.dumps(logs, indent=2)
+ elif format == 'csv':
+ return self._logs_to_csv(logs)
+ elif format == 'text':
+ return self._logs_to_text(logs)
+ else:
+ raise ValueError(f"Unsupported export format: {format}")
+
+ except Exception as e:
+ logger.error(f"Error exporting logs: {e}")
+ raise
+
+ def _logs_to_csv(self, logs: List[Dict[str, Any]]) -> str:
+ """Convert logs to CSV format"""
+ if not logs:
+ return ""
+
+ # Get all possible fields
+ fields = set()
+ for log in logs:
+ fields.update(log.keys())
+
+ fields = sorted(list(fields))
+
+ # Create CSV
+ csv_lines = [','.join(fields)]
+ for log in logs:
+ row = [str(log.get(field, '')) for field in fields]
+ csv_lines.append(','.join(row))
+
+ return '\n'.join(csv_lines)
+
+ def _logs_to_text(self, logs: List[Dict[str, Any]]) -> str:
+ """Convert logs to text format"""
+ text_lines = []
+ for log in logs:
+ timestamp = log.get('timestamp', '')
+ level = log.get('level', '')
+ service = log.get('service', '')
+ message = log.get('message', '')
+ text_lines.append(f"{timestamp} [{level}] {service}: {message}")
+
+ return '\n'.join(text_lines)
+
+ def get_log_statistics(self, service: Optional[str] = None) -> Dict[str, Any]:
+ """Get log statistics"""
+ stats = {}
+
+ if service:
+ services = [service]
+ else:
+ services = list(self.service_loggers.keys())
+
+ for svc in services:
+ try:
+ log_file = self.log_dir / f'{svc}.log'
+ if not log_file.exists():
+ stats[svc] = {'error': 'Log file not found'}
+ continue
+
+ # Count log entries by level
+ level_counts = defaultdict(int)
+ total_entries = 0
+ last_entry = None
+
+ with open(log_file, 'r', encoding='utf-8', errors='ignore') as f:
+ for line in f:
+ try:
+ log_entry = json.loads(line.strip())
+ level = log_entry.get('level', 'UNKNOWN')
+ level_counts[level] += 1
+ total_entries += 1
+ last_entry = log_entry.get('timestamp')
+ except json.JSONDecodeError:
+ total_entries += 1
+
+ stats[svc] = {
+ 'total_entries': total_entries,
+ 'level_counts': dict(level_counts),
+ 'last_entry': last_entry,
+ 'file_size': log_file.stat().st_size
+ }
+
+ except Exception as e:
+ stats[svc] = {'error': str(e)}
+
+ return stats
+
+ def rotate_logs(self, service: Optional[str] = None):
+ """Manually rotate logs"""
+ try:
+ if service:
+ services = [service]
+ else:
+ services = list(self.service_loggers.keys())
+
+ for svc in services:
+ if svc in self.handlers and 'file' in self.handlers[svc]:
+ handler = self.handlers[svc]['file']
+ handler.doRollover()
+ logger.info(f"Rotated logs for service: {svc}")
+
+ except Exception as e:
+ logger.error(f"Error rotating logs: {e}")
+
+ def cleanup_old_logs(self, days: int = 30):
+ """Clean up log files older than specified days"""
+ try:
+ cutoff_date = datetime.now() - timedelta(days=days)
+ deleted_count = 0
+
+ for log_file in self.log_dir.glob('*.log.*'):
+ try:
+ file_time = datetime.fromtimestamp(log_file.stat().st_mtime)
+ if file_time < cutoff_date:
+ log_file.unlink()
+ deleted_count += 1
+ except Exception as e:
+ logger.warning(f"Error checking file {log_file}: {e}")
+
+ logger.info(f"Cleaned up {deleted_count} old log files")
+
+ except Exception as e:
+ logger.error(f"Error cleaning up old logs: {e}")
+
+ def _start_rotation_monitor(self):
+ """Start automatic log rotation monitoring"""
+ self.running = True
+ self.rotation_thread = threading.Thread(target=self._rotation_monitor_loop, daemon=True)
+ self.rotation_thread.start()
+
+ def _rotation_monitor_loop(self):
+ """Monitor and rotate logs automatically"""
+ while self.running:
+ try:
+ # Check each service's log file size
+ for service in self.service_loggers.keys():
+ log_file = self.log_dir / f'{service}.log'
+ if log_file.exists() and log_file.stat().st_size > self.max_file_size:
+ self.rotate_logs(service)
+
+ # Sleep for 1 hour before next check
+ time.sleep(3600)
+
+ except Exception as e:
+ logger.error(f"Error in rotation monitor: {e}")
+ time.sleep(60) # Sleep for 1 minute on error
+
+ def stop(self):
+ """Stop the log manager"""
+ self.running = False
+ if self.rotation_thread:
+ self.rotation_thread.join(timeout=5)
+
+ # Close all handlers
+ for service_handlers in self.handlers.values():
+ for handler in service_handlers.values():
+ handler.close()
+
+ logger.info("Log manager stopped")
+
+ def get_log_file_info(self, service: str) -> Dict[str, Any]:
+ """Get information about a service's log file"""
+ try:
+ log_file = self.log_dir / f'{service}.log'
+ if not log_file.exists():
+ return {'error': 'Log file not found'}
+
+ stat = log_file.stat()
+ return {
+ 'file_path': str(log_file),
+ 'file_size': stat.st_size,
+ 'created': datetime.fromtimestamp(stat.st_ctime).isoformat(),
+ 'modified': datetime.fromtimestamp(stat.st_mtime).isoformat(),
+ 'exists': True
+ }
+
+ except Exception as e:
+ return {'error': str(e)}
+
+ def compress_old_logs(self):
+ """Compress old log files to save space"""
+ try:
+ compressed_count = 0
+
+ for log_file in self.log_dir.glob('*.log.*'):
+ if not log_file.name.endswith('.gz'):
+ try:
+ with open(log_file, 'rb') as f_in:
+ gz_file = log_file.with_suffix(log_file.suffix + '.gz')
+ with gzip.open(gz_file, 'wb') as f_out:
+ shutil.copyfileobj(f_in, f_out)
+
+ # Remove original file
+ log_file.unlink()
+ compressed_count += 1
+
+ except Exception as e:
+ logger.warning(f"Error compressing {log_file}: {e}")
+
+ logger.info(f"Compressed {compressed_count} log files")
+
+ except Exception as e:
logger.error(f"Error compressing logs: {e}")
\ No newline at end of file
diff --git a/api/network_manager.py b/api/network_manager.py
index a0092de..9ebcaed 100644
--- a/api/network_manager.py
+++ b/api/network_manager.py
@@ -408,47 +408,111 @@ class NetworkManager(BaseServiceManager):
is_docker = os.path.exists('/.dockerenv') or os.environ.get('DOCKER_CONTAINER') == 'true'
if is_docker:
- # Return positive status when running in Docker
+ # Check if network containers are actually running
+ dns_running = self._check_dns_container_status()
+ dhcp_running = self._check_dhcp_container_status()
+ ntp_running = self._check_ntp_container_status()
+ all_running = dns_running and dhcp_running and ntp_running
+
status = {
- 'dns_running': True,
- 'dhcp_running': True,
- 'ntp_running': True,
- 'running': True,
- 'status': 'online',
+ 'dns_running': dns_running,
+ 'dhcp_running': dhcp_running,
+ 'ntp_running': ntp_running,
+ 'running': all_running,
+ 'status': 'online' if all_running else 'offline',
+ 'network': {
+ 'dns_running': dns_running,
+ 'dhcp_running': dhcp_running,
+ 'ntp_running': ntp_running,
+ 'running': all_running,
+ 'status': 'online' if all_running else 'offline'
+ },
'timestamp': datetime.utcnow().isoformat()
}
else:
# Check actual service status in production
+ dns_running = self._check_dns_status()
+ dhcp_running = self._check_dhcp_status()
+ ntp_running = self._check_ntp_status()
+
status = {
- 'dns_running': self._check_dns_status(),
- 'dhcp_running': self._check_dhcp_status(),
- 'ntp_running': self._check_ntp_status(),
+ 'dns_running': dns_running,
+ 'dhcp_running': dhcp_running,
+ 'ntp_running': ntp_running,
+ 'running': dns_running and dhcp_running and ntp_running,
+ 'status': 'online' if (dns_running and dhcp_running and ntp_running) else 'offline',
+ 'network': {
+ 'dns_running': dns_running,
+ 'dhcp_running': dhcp_running,
+ 'ntp_running': ntp_running,
+ 'running': dns_running and dhcp_running and ntp_running,
+ 'status': 'online' if (dns_running and dhcp_running and ntp_running) else 'offline'
+ },
'timestamp': datetime.utcnow().isoformat()
}
-
- # Determine overall status
- status['running'] = status['dns_running'] and status['dhcp_running'] and status['ntp_running']
- status['status'] = 'online' if status['running'] else 'offline'
return status
except Exception as e:
return self.handle_error(e, "get_status")
+ def _check_dns_container_status(self) -> bool:
+ """Check if DNS Docker container is running"""
+ try:
+ import docker
+ client = docker.from_env()
+ containers = client.containers.list(filters={'name': 'cell-dns'})
+ return len(containers) > 0
+ except Exception:
+ return False
+
+ def _check_dhcp_container_status(self) -> bool:
+ """Check if DHCP Docker container is running"""
+ try:
+ import docker
+ client = docker.from_env()
+ containers = client.containers.list(filters={'name': 'cell-dhcp'})
+ return len(containers) > 0
+ except Exception:
+ return False
+
+ def _check_ntp_container_status(self) -> bool:
+ """Check if NTP Docker container is running"""
+ try:
+ import docker
+ client = docker.from_env()
+ containers = client.containers.list(filters={'name': 'cell-ntp'})
+ return len(containers) > 0
+ except Exception:
+ return False
+
def test_connectivity(self) -> Dict[str, Any]:
"""Test network service connectivity"""
try:
+ dns_test = self.test_dns_resolution('google.com')
+ dhcp_test = self.test_dhcp_functionality()
+ ntp_test = self.test_ntp_functionality()
+
results = {
- 'dns_test': self.test_dns_resolution('google.com'),
- 'dhcp_test': self.test_dhcp_functionality(),
- 'ntp_test': self.test_ntp_functionality(),
+ 'dns_test': dns_test,
+ 'dhcp_test': dhcp_test,
+ 'ntp_test': ntp_test,
'timestamp': datetime.utcnow().isoformat()
}
# Determine overall success
- results['success'] = all(
+ success = all(
result.get('success', False)
- for result in [results['dns_test'], results['dhcp_test'], results['ntp_test']]
+ for result in [dns_test, dhcp_test, ntp_test]
)
+ results['success'] = success
+
+ # Add network key for compatibility
+ results['network'] = {
+ 'dns_test': dns_test,
+ 'dhcp_test': dhcp_test,
+ 'ntp_test': ntp_test,
+ 'success': success
+ }
return results
except Exception as e:
diff --git a/api/routing_manager.py b/api/routing_manager.py
index e92e820..fb3aabe 100644
--- a/api/routing_manager.py
+++ b/api/routing_manager.py
@@ -9,6 +9,7 @@ import json
import subprocess
import logging
import ipaddress
+import time
from datetime import datetime
from typing import Dict, List, Optional, Tuple, Any
import re
@@ -24,12 +25,19 @@ class RoutingManager(BaseServiceManager):
self.routing_dir = os.path.join(config_dir, 'routing')
self.rules_file = os.path.join(data_dir, 'routing', 'rules.json')
+ # Service state tracking
+ self._service_running = False
+ self._state_file = os.path.join(data_dir, 'routing', 'service_state.json')
+
# Ensure directories exist
os.makedirs(self.routing_dir, exist_ok=True)
os.makedirs(os.path.dirname(self.rules_file), exist_ok=True)
# Initialize routing configuration
self._ensure_config_exists()
+
+ # Load service state
+ self._load_service_state()
def _ensure_config_exists(self):
"""Ensure routing configuration exists"""
@@ -53,6 +61,33 @@ class RoutingManager(BaseServiceManager):
logger.info("Routing rules initialized")
+ def _load_service_state(self):
+ """Load service state from file"""
+ try:
+ if os.path.exists(self._state_file):
+ with open(self._state_file, 'r') as f:
+ state = json.load(f)
+ self._service_running = state.get('running', False)
+ else:
+ # Default to running if no state file exists (for backward compatibility)
+ self._service_running = True
+ self._save_service_state()
+ except Exception as e:
+ logger.error(f"Failed to load service state: {e}")
+ self._service_running = True
+
+ def _save_service_state(self):
+ """Save service state to file"""
+ try:
+ state = {
+ 'running': self._service_running,
+ 'timestamp': datetime.utcnow().isoformat()
+ }
+ with open(self._state_file, 'w') as f:
+ json.dump(state, f, indent=2)
+ except Exception as e:
+ logger.error(f"Failed to save service state: {e}")
+
def _validate_cidr(self, cidr):
import ipaddress
try:
@@ -485,9 +520,12 @@ class RoutingManager(BaseServiceManager):
routing_status = self.get_routing_status()
rules = self._load_rules()
+ # Check if routing service is actually running by testing basic functionality
+ is_running = self._is_routing_service_running()
+
status = {
- 'running': routing_status.get('running', False),
- 'status': 'online' if routing_status.get('running', False) else 'offline',
+ 'running': is_running,
+ 'status': 'online' if is_running else 'offline',
'routing_status': routing_status,
'nat_rules_count': len(rules.get('nat_rules', [])),
'peer_routes_count': len(rules.get('peer_routes', {})),
@@ -569,6 +607,13 @@ class RoutingManager(BaseServiceManager):
'message': f'iptables access failed: {result.stderr}',
'error': result.stderr
}
+ except FileNotFoundError:
+ # System tools not available (development environment)
+ return {
+ 'success': True,
+ 'message': 'iptables not available (development mode)',
+ 'rules_count': 0
+ }
except Exception as e:
return {
'success': False,
@@ -596,6 +641,13 @@ class RoutingManager(BaseServiceManager):
'message': f'Network interfaces access failed: {result.stderr}',
'error': result.stderr
}
+ except FileNotFoundError:
+ # System tools not available (development environment)
+ return {
+ 'success': True,
+ 'message': 'Network tools not available (development mode)',
+ 'interfaces_count': 0
+ }
except Exception as e:
return {
'success': False,
@@ -623,6 +675,13 @@ class RoutingManager(BaseServiceManager):
'message': f'Routing table access failed: {result.stderr}',
'error': result.stderr
}
+ except FileNotFoundError:
+ # System tools not available (development environment)
+ return {
+ 'success': True,
+ 'message': 'Routing tools not available (development mode)',
+ 'routes_count': 0
+ }
except Exception as e:
return {
'success': False,
@@ -815,6 +874,19 @@ class RoutingManager(BaseServiceManager):
return routes
+ except FileNotFoundError:
+ # System tools not available (development environment)
+ # Return mock routing table for development
+ return [
+ {
+ 'route': 'default via 192.168.1.1 dev en0',
+ 'parsed': {'destination': 'default', 'via': '192.168.1.1', 'dev': 'en0', 'metric': ''}
+ },
+ {
+ 'route': '10.0.0.0/24 dev wg0',
+ 'parsed': {'destination': '10.0.0.0/24', 'via': '', 'dev': 'wg0', 'metric': ''}
+ }
+ ]
except Exception as e:
logger.error(f"Failed to get routing table: {e}")
return []
@@ -843,4 +915,102 @@ class RoutingManager(BaseServiceManager):
except Exception as e:
logger.error(f"Failed to parse route: {e}")
- return {'destination': route_line, 'via': '', 'dev': '', 'metric': ''}
\ No newline at end of file
+ return {'destination': route_line, 'via': '', 'dev': '', 'metric': ''}
+
+ def _is_routing_service_running(self) -> bool:
+ """Check if routing service is actually running"""
+ # Use internal state tracking instead of system tool checks
+ return self._service_running
+
+ def start(self) -> bool:
+ """Start routing service"""
+ try:
+ # Set internal state to running
+ self._service_running = True
+ self._save_service_state()
+
+ # Try to enable IP forwarding (may fail in Docker without privileges)
+ try:
+ subprocess.run(['sysctl', '-w', 'net.ipv4.ip_forward=1'],
+ check=True, timeout=10)
+ except (subprocess.CalledProcessError, FileNotFoundError) as e:
+ logger.warning(f"Could not enable IP forwarding: {e}")
+ # Continue anyway - service is considered started
+
+ # Load existing rules
+ rules = self._load_rules()
+
+ # Apply all enabled rules (may fail in Docker without privileges)
+ try:
+ for rule in rules.get('nat_rules', []):
+ if rule.get('enabled', True):
+ self._apply_nat_rule(rule)
+
+ for rule in rules.get('firewall_rules', []):
+ if rule.get('enabled', True):
+ self._apply_firewall_rule(rule)
+
+ for route in rules.get('peer_routes', {}).values():
+ if route.get('enabled', True):
+ self._apply_peer_route(route)
+
+ for exit_node in rules.get('exit_nodes', []):
+ if exit_node.get('enabled', True):
+ self._apply_exit_node(exit_node)
+ except Exception as e:
+ logger.warning(f"Could not apply routing rules: {e}")
+ # Continue anyway - service is considered started
+
+ logger.info("Routing service started successfully")
+ return True
+
+ except Exception as e:
+ logger.error(f"Failed to start routing service: {e}")
+ self._service_running = False
+ self._save_service_state()
+ return False
+
+ def stop(self) -> bool:
+ """Stop routing service"""
+ try:
+ # Set internal state to stopped
+ self._service_running = False
+ self._save_service_state()
+
+ # Try to clear all iptables rules (may fail in Docker without privileges)
+ try:
+ subprocess.run(['iptables', '-t', 'nat', '-F'],
+ check=True, timeout=10)
+ subprocess.run(['iptables', '-F'],
+ check=True, timeout=10)
+ except (subprocess.CalledProcessError, FileNotFoundError) as e:
+ logger.warning(f"Could not clear iptables rules: {e}")
+ # Continue anyway - service is considered stopped
+
+ # Try to disable IP forwarding (may fail in Docker without privileges)
+ try:
+ subprocess.run(['sysctl', '-w', 'net.ipv4.ip_forward=0'],
+ check=True, timeout=10)
+ except (subprocess.CalledProcessError, FileNotFoundError) as e:
+ logger.warning(f"Could not disable IP forwarding: {e}")
+ # Continue anyway - service is considered stopped
+
+ logger.info("Routing service stopped successfully")
+ return True
+
+ except Exception as e:
+ logger.error(f"Failed to stop routing service: {e}")
+ # Even if system commands fail, we consider the service stopped
+ self._service_running = False
+ self._save_service_state()
+ return True # Return True because the state is now stopped
+
+ def restart(self) -> bool:
+ """Restart routing service"""
+ try:
+ self.stop()
+ time.sleep(1) # Brief pause
+ return self.start()
+ except Exception as e:
+ logger.error(f"Failed to restart routing service: {e}")
+ return False
\ No newline at end of file
diff --git a/api/service_bus.py b/api/service_bus.py
index b1f60db..c53a394 100644
--- a/api/service_bus.py
+++ b/api/service_bus.py
@@ -179,27 +179,40 @@ class ServiceBus:
def orchestrate_service_start(self, service_name: str) -> bool:
"""Orchestrate starting a service with its dependencies"""
try:
- # Check dependencies
- dependencies = self.service_dependencies.get(service_name, [])
- for dep in dependencies:
- if dep not in self.service_registry:
- logger.warning(f"Service {service_name} depends on {dep} which is not registered")
+ # Map service names to Docker container names
+ service_to_container = {
+ 'wireguard': 'cell-wireguard',
+ 'email': 'cell-mail',
+ 'calendar': 'cell-radicale',
+ 'files': 'cell-webdav',
+ 'network': 'cell-dns', # DNS is the main network service
+ 'routing': None, # Routing is a system service, not a container
+ 'vault': None, # Vault is part of API, not a separate container
+ 'container': None # Container manager doesn't have its own container
+ }
+
+ container_name = service_to_container.get(service_name)
+
+ if container_name is None:
+ # For services without containers (routing, vault, container), just call their start method
+ if hasattr(self.service_registry[service_name], 'start'):
+ self.service_registry[service_name].start()
+ logger.info(f"Started service (no container): {service_name}")
+ return True
+
+ # For services with containers, start the Docker container
+ if 'container' in self.service_registry:
+ container_manager = self.service_registry['container']
+ success = container_manager.start_container(container_name)
+ if success:
+ logger.info(f"Started container {container_name} for service {service_name}")
+ return True
+ else:
+ logger.error(f"Failed to start container {container_name} for service {service_name}")
return False
-
- # Run pre-start hooks
- if service_name in self.lifecycle_hooks and 'pre_start' in self.lifecycle_hooks[service_name]:
- self.lifecycle_hooks[service_name]['pre_start']()
-
- # Start the service
- if hasattr(self.service_registry[service_name], 'start'):
- self.service_registry[service_name].start()
-
- # Run post-start hooks
- if service_name in self.lifecycle_hooks and 'post_start' in self.lifecycle_hooks[service_name]:
- self.lifecycle_hooks[service_name]['post_start']()
-
- logger.info(f"Orchestrated start of service: {service_name}")
- return True
+ else:
+ logger.error("Container manager not available")
+ return False
except Exception as e:
logger.error(f"Error orchestrating start of {service_name}: {e}")
@@ -208,20 +221,40 @@ class ServiceBus:
def orchestrate_service_stop(self, service_name: str) -> bool:
"""Orchestrate stopping a service"""
try:
- # Run pre-stop hooks
- if service_name in self.lifecycle_hooks and 'pre_stop' in self.lifecycle_hooks[service_name]:
- self.lifecycle_hooks[service_name]['pre_stop']()
+ # Map service names to Docker container names
+ service_to_container = {
+ 'wireguard': 'cell-wireguard',
+ 'email': 'cell-mail',
+ 'calendar': 'cell-radicale',
+ 'files': 'cell-webdav',
+ 'network': 'cell-dns', # DNS is the main network service
+ 'routing': None, # Routing is a system service, not a container
+ 'vault': None, # Vault is part of API, not a separate container
+ 'container': None # Container manager doesn't have its own container
+ }
- # Stop the service
- if hasattr(self.service_registry[service_name], 'stop'):
- self.service_registry[service_name].stop()
+ container_name = service_to_container.get(service_name)
- # Run post-stop hooks
- if service_name in self.lifecycle_hooks and 'post_stop' in self.lifecycle_hooks[service_name]:
- self.lifecycle_hooks[service_name]['post_stop']()
+ if container_name is None:
+ # For services without containers (routing, vault, container), just call their stop method
+ if hasattr(self.service_registry[service_name], 'stop'):
+ self.service_registry[service_name].stop()
+ logger.info(f"Stopped service (no container): {service_name}")
+ return True
- logger.info(f"Orchestrated stop of service: {service_name}")
- return True
+ # For services with containers, stop the Docker container
+ if 'container' in self.service_registry:
+ container_manager = self.service_registry['container']
+ success = container_manager.stop_container(container_name)
+ if success:
+ logger.info(f"Stopped container {container_name} for service {service_name}")
+ return True
+ else:
+ logger.error(f"Failed to stop container {container_name} for service {service_name}")
+ return False
+ else:
+ logger.error("Container manager not available")
+ return False
except Exception as e:
logger.error(f"Error orchestrating stop of {service_name}: {e}")
diff --git a/api/test_enhanced_api.py b/api/test_enhanced_api.py
index a6b0ee2..5cf798e 100644
--- a/api/test_enhanced_api.py
+++ b/api/test_enhanced_api.py
@@ -1,674 +1,694 @@
-#!/usr/bin/env python3
-"""
-Comprehensive Test Suite for Enhanced Personal Internet Cell API
-Tests all new components and integrations
-"""
-
-import unittest
-import json
-import tempfile
-import os
-import shutil
-from datetime import datetime, timedelta
-from unittest.mock import Mock, patch, MagicMock
-import sys
-import threading
-import time
-
-# Add the api directory to the path
-sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
-
-from base_service_manager import BaseServiceManager
-from config_manager import ConfigManager
-from service_bus import ServiceBus, EventType, Event
-from log_manager import LogManager, LogLevel
-from network_manager import NetworkManager
-from enhanced_cli import APIClient, ConfigManager as CLIConfigManager, EnhancedCLI
-
-class TestBaseServiceManager(unittest.TestCase):
- """Test the base service manager functionality"""
-
- def setUp(self):
- self.temp_dir = tempfile.mkdtemp()
- self.data_dir = os.path.join(self.temp_dir, 'data')
- self.config_dir = os.path.join(self.temp_dir, 'config')
- os.makedirs(self.data_dir, exist_ok=True)
- os.makedirs(self.config_dir, exist_ok=True)
-
- # Create a concrete implementation for testing
- class TestServiceManager(BaseServiceManager):
- def get_status(self):
- return {'running': True, 'status': 'online'}
-
- def test_connectivity(self):
- return {'success': True, 'message': 'Connected'}
-
- self.service_manager = TestServiceManager('test_service', self.data_dir, self.config_dir)
-
- def tearDown(self):
- shutil.rmtree(self.temp_dir)
-
- def test_initialization(self):
- """Test service manager initialization"""
- self.assertEqual(self.service_manager.service_name, 'test_service')
- self.assertEqual(self.service_manager.data_dir, self.data_dir)
- self.assertEqual(self.service_manager.config_dir, self.config_dir)
- self.assertTrue(os.path.exists(self.data_dir))
- self.assertTrue(os.path.exists(self.config_dir))
-
- def test_get_status(self):
- """Test get_status method"""
- status = self.service_manager.get_status()
- self.assertEqual(status['running'], True)
- self.assertEqual(status['status'], 'online')
-
- def test_test_connectivity(self):
- """Test test_connectivity method"""
- connectivity = self.service_manager.test_connectivity()
- self.assertEqual(connectivity['success'], True)
- self.assertEqual(connectivity['message'], 'Connected')
-
- def test_get_logs(self):
- """Test get_logs method"""
- # Create a test log file
- log_file = os.path.join(self.data_dir, 'test_service.log')
- with open(log_file, 'w') as f:
- f.write("Test log line 1\n")
- f.write("Test log line 2\n")
-
- logs = self.service_manager.get_logs(lines=2)
- self.assertEqual(len(logs), 2)
- self.assertIn("Test log line 1", logs[0])
- self.assertIn("Test log line 2", logs[1])
-
- def test_get_config(self):
- """Test get_config method"""
- # Create a test config file
- config_file = os.path.join(self.config_dir, 'test_service.json')
- test_config = {'key': 'value', 'number': 42}
- with open(config_file, 'w') as f:
- json.dump(test_config, f)
-
- config = self.service_manager.get_config()
- self.assertEqual(config['key'], 'value')
- self.assertEqual(config['number'], 42)
-
- def test_update_config(self):
- """Test update_config method"""
- test_config = {'new_key': 'new_value', 'number': 100}
- success = self.service_manager.update_config(test_config)
- self.assertTrue(success)
-
- # Verify config was saved
- config = self.service_manager.get_config()
- self.assertEqual(config['new_key'], 'new_value')
- self.assertEqual(config['number'], 100)
-
- def test_validate_config(self):
- """Test validate_config method"""
- test_config = {'key': 'value'}
- validation = self.service_manager.validate_config(test_config)
- self.assertTrue(validation['valid'])
- self.assertEqual(len(validation['errors']), 0)
-
- def test_get_metrics(self):
- """Test get_metrics method"""
- metrics = self.service_manager.get_metrics()
- self.assertEqual(metrics['service'], 'test_service')
- self.assertIn('timestamp', metrics)
- self.assertEqual(metrics['status'], 'unknown')
-
- def test_handle_error(self):
- """Test handle_error method"""
- test_error = ValueError("Test error")
- error_info = self.service_manager.handle_error(test_error, "test_context")
-
- self.assertEqual(error_info['error'], "Test error")
- self.assertEqual(error_info['type'], "ValueError")
- self.assertEqual(error_info['context'], "test_context")
- self.assertEqual(error_info['service'], 'test_service')
- self.assertIn('traceback', error_info)
-
- def test_health_check(self):
- """Test health_check method"""
- health = self.service_manager.health_check()
-
- self.assertEqual(health['service'], 'test_service')
- self.assertIn('timestamp', health)
- self.assertIn('status', health)
- self.assertIn('connectivity', health)
- self.assertIn('metrics', health)
- self.assertIn('healthy', health)
- self.assertTrue(health['healthy'])
-
-class TestConfigManager(unittest.TestCase):
- """Test the configuration manager functionality"""
-
- def setUp(self):
- self.temp_dir = tempfile.mkdtemp()
- self.config_dir = os.path.join(self.temp_dir, 'config')
- self.data_dir = os.path.join(self.temp_dir, 'data')
- os.makedirs(self.config_dir, exist_ok=True)
- os.makedirs(self.data_dir, exist_ok=True)
-
- self.config_file = os.path.join(self.config_dir, 'cell_config.json')
- assert not os.path.isdir(self.config_file), f"self.config_file is a directory: {self.config_file}"
- print(f"[DEBUG] TestConfigManager.setUp: self.config_file = {self.config_file}")
- # Ensure the config file exists and is a valid JSON file
- if not os.path.exists(self.config_file):
- with open(self.config_file, 'w') as f:
- json.dump({}, f)
- self.config_manager = ConfigManager(self.config_file, self.data_dir)
-
- def tearDown(self):
- shutil.rmtree(self.temp_dir)
- if os.path.exists(self.config_file):
- os.remove(self.config_file)
-
- def test_initialization(self):
- assert not os.path.isdir(self.config_file), f"self.config_file is a directory: {self.config_file}"
- print(f"[DEBUG] test_initialization: self.config_file = {self.config_file}")
- """Test config manager initialization"""
- self.assertTrue(os.path.exists(self.config_dir))
- self.assertTrue(os.path.exists(self.data_dir))
- self.assertTrue(os.path.exists(self.config_manager.backup_dir))
- self.assertIsNotNone(self.config_manager.service_schemas)
-
- def test_get_service_config(self):
- assert not os.path.isdir(self.config_file), f"self.config_file is a directory: {self.config_file}"
- print(f"[DEBUG] test_get_service_config: self.config_file = {self.config_file}")
- """Test getting service configuration"""
- # Test with non-existent service
- with self.assertRaises(ValueError):
- self.config_manager.get_service_config('nonexistent_service')
-
- # Test with valid service
- config = self.config_manager.get_service_config('network')
- self.assertEqual(config, {})
-
- def test_update_service_config(self):
- assert not os.path.isdir(self.config_file), f"self.config_file is a directory: {self.config_file}"
- print(f"[DEBUG] test_update_service_config: self.config_file = {self.config_file}")
- """Test updating service configuration"""
- test_config = {
- 'dns_port': 53,
- 'dhcp_range': '10.0.0.100-10.0.0.200',
- 'ntp_servers': ['pool.ntp.org']
- }
-
- success = self.config_manager.update_service_config('network', test_config)
- self.assertTrue(success)
-
- # Verify config was saved
- config = self.config_manager.get_service_config('network')
- self.assertEqual(config['dns_port'], 53)
- self.assertEqual(config['dhcp_range'], '10.0.0.100-10.0.0.200')
- self.assertEqual(config['ntp_servers'], ['pool.ntp.org'])
-
- def test_validate_config(self):
- assert not os.path.isdir(self.config_file), f"self.config_file is a directory: {self.config_file}"
- print(f"[DEBUG] test_validate_config: self.config_file = {self.config_file}")
- """Test configuration validation"""
- # Test valid config
- valid_config = {
- 'dns_port': 53,
- 'dhcp_range': '10.0.0.100-10.0.0.200',
- 'ntp_servers': ['pool.ntp.org']
- }
- validation = self.config_manager.validate_config('network', valid_config)
- self.assertTrue(validation['valid'])
- self.assertEqual(len(validation['errors']), 0)
-
- # Test invalid config (missing required field)
- invalid_config = {
- 'dns_port': 53
- # Missing dhcp_range and ntp_servers
- }
- validation = self.config_manager.validate_config('network', invalid_config)
- self.assertFalse(validation['valid'])
- self.assertGreater(len(validation['errors']), 0)
-
- # Test invalid config (wrong type)
- invalid_type_config = {
- 'dns_port': 'not_a_number',
- 'dhcp_range': '10.0.0.100-10.0.0.200',
- 'ntp_servers': ['pool.ntp.org']
- }
- validation = self.config_manager.validate_config('network', invalid_type_config)
- self.assertFalse(validation['valid'])
- self.assertGreater(len(validation['errors']), 0)
-
- def test_backup_and_restore(self):
- assert not os.path.isdir(self.config_file), f"self.config_file is a directory: {self.config_file}"
- print(f"[DEBUG] test_backup_and_restore: self.config_file = {self.config_file}")
- """Test configuration backup and restore"""
- # Create some test configurations
- test_configs = {
- 'network': {'dns_port': 53, 'dhcp_range': '10.0.0.100-10.0.0.200'},
- 'wireguard': {'port': 51820, 'private_key': 'test_key'}
- }
-
- for service, config in test_configs.items():
- self.config_manager.update_service_config(service, config)
-
- # Create backup
- backup_id = self.config_manager.backup_config()
- self.assertIsNotNone(backup_id)
-
- # List backups
- backups = self.config_manager.list_backups()
- self.assertEqual(len(backups), 1)
- self.assertEqual(backups[0]['backup_id'], backup_id)
-
- # Modify config
- self.config_manager.update_service_config('network', {'dns_port': 5353})
-
- # Restore backup
- success = self.config_manager.restore_config(backup_id)
- self.assertTrue(success)
-
- # Verify restoration
- config = self.config_manager.get_service_config('network')
- self.assertEqual(config['dns_port'], 53) # Should be restored value
-
- def test_export_import_config(self):
- assert not os.path.isdir(self.config_file), f"self.config_file is a directory: {self.config_file}"
- print(f"[DEBUG] test_export_import_config: self.config_file = {self.config_file}")
- """Test configuration export and import"""
- # Create test configurations
- test_configs = {
- 'network': {'dns_port': 53, 'dhcp_range': '10.0.0.100-10.0.0.200'},
- 'wireguard': {'port': 51820, 'private_key': 'test_key'}
- }
-
- for service, config in test_configs.items():
- self.config_manager.update_service_config(service, config)
-
- # Export configuration
- exported_json = self.config_manager.export_config('json')
- exported_yaml = self.config_manager.export_config('yaml')
-
- self.assertIsInstance(exported_json, str)
- self.assertIsInstance(exported_yaml, str)
-
- # Clear unified config file
- if os.path.exists(self.config_file):
- os.remove(self.config_file)
-
- # Import configuration
- success = self.config_manager.import_config(exported_json, 'json')
- self.assertTrue(success)
-
- # Verify import
- for service, expected_config in test_configs.items():
- config = self.config_manager.get_service_config(service)
- for key, value in expected_config.items():
- self.assertEqual(config[key], value)
-
-class TestServiceBus(unittest.TestCase):
- """Test the service bus functionality"""
-
- def setUp(self):
- self.service_bus = ServiceBus()
-
- def test_initialization(self):
- """Test service bus initialization"""
- self.assertFalse(self.service_bus.running)
- self.assertEqual(len(self.service_bus.service_registry), 0)
- self.assertEqual(len(self.service_bus.event_handlers), 0)
-
- def test_start_stop(self):
- """Test service bus start and stop"""
- self.service_bus.start()
- self.assertTrue(self.service_bus.running)
- self.assertIsNotNone(self.service_bus.event_loop_thread)
-
- self.service_bus.stop()
- self.assertFalse(self.service_bus.running)
-
- def test_register_unregister_service(self):
- """Test service registration and unregistration"""
- mock_service = Mock()
- mock_service.get_status.return_value = {'running': True}
-
- # Register service
- self.service_bus.register_service('test_service', mock_service)
- self.assertIn('test_service', self.service_bus.service_registry)
- self.assertEqual(self.service_bus.service_registry['test_service'], mock_service)
-
- # Unregister service
- self.service_bus.unregister_service('test_service')
- self.assertNotIn('test_service', self.service_bus.service_registry)
-
- def test_publish_subscribe_events(self):
- """Test event publishing and subscription"""
- events_received = []
-
- def event_handler(event):
- events_received.append(event)
-
- # Subscribe to events
- self.service_bus.subscribe_to_event(EventType.SERVICE_STARTED, event_handler)
-
- # Start service bus
- self.service_bus.start()
-
- # Publish event
- test_data = {'service': 'test_service', 'timestamp': datetime.utcnow().isoformat()}
- self.service_bus.publish_event(EventType.SERVICE_STARTED, 'test_service', test_data)
-
- # Wait for event processing
- time.sleep(0.1)
-
- # Check if event was received
- self.assertEqual(len(events_received), 1)
- self.assertEqual(events_received[0].event_type, EventType.SERVICE_STARTED)
- self.assertEqual(events_received[0].source, 'test_service')
- self.assertEqual(events_received[0].data, test_data)
-
- self.service_bus.stop()
-
- def test_call_service(self):
- """Test service method calling"""
- mock_service = Mock(spec=[])
- mock_service.test_method.return_value = 'test_result'
-
- self.service_bus.register_service('test_service', mock_service)
-
- # Call service method
- result = self.service_bus.call_service('test_service', 'test_method', arg1='value1')
- self.assertEqual(result, 'test_result')
- mock_service.test_method.assert_called_once_with(arg1='value1')
-
- # Test calling non-existent service
- with self.assertRaises(ValueError):
- self.service_bus.call_service('nonexistent_service', 'test_method')
-
- # Test calling non-existent method
- with self.assertRaises(ValueError):
- self.service_bus.call_service('test_service', 'nonexistent_method')
-
- def test_service_orchestration(self):
- """Test service orchestration"""
- mock_service = Mock()
- mock_service.start = Mock()
- mock_service.stop = Mock()
-
- self.service_bus.register_service('test_service', mock_service)
-
- # Test service start orchestration
- success = self.service_bus.orchestrate_service_start('test_service')
- self.assertTrue(success)
- mock_service.start.assert_called_once()
-
- # Test service stop orchestration
- success = self.service_bus.orchestrate_service_stop('test_service')
- self.assertTrue(success)
- mock_service.stop.assert_called_once()
-
- # Test service restart orchestration
- success = self.service_bus.orchestrate_service_restart('test_service')
- self.assertTrue(success)
- self.assertEqual(mock_service.start.call_count, 2)
- self.assertEqual(mock_service.stop.call_count, 2)
-
- def test_event_history(self):
- """Test event history functionality"""
- self.service_bus.start()
-
- # Publish some events
- for i in range(5):
- self.service_bus.publish_event(EventType.SERVICE_STARTED, f'service_{i}', {'index': i})
-
- # Wait for event processing
- time.sleep(0.1)
-
- # Get event history
- events = self.service_bus.get_event_history(limit=3)
- self.assertEqual(len(events), 3)
-
- # Test filtering by event type
- started_events = self.service_bus.get_event_history(EventType.SERVICE_STARTED, limit=2)
- self.assertEqual(len(started_events), 2)
- for event in started_events:
- self.assertEqual(event.event_type, EventType.SERVICE_STARTED)
-
- # Test filtering by source
- service_0_events = self.service_bus.get_event_history(source='service_0')
- self.assertEqual(len(service_0_events), 1)
- self.assertEqual(service_0_events[0].source, 'service_0')
-
- self.service_bus.stop()
-
-class TestLogManager(unittest.TestCase):
- """Test the log manager functionality"""
-
- def setUp(self):
- self.temp_dir = tempfile.mkdtemp()
- self.log_dir = os.path.join(self.temp_dir, 'logs')
- os.makedirs(self.log_dir, exist_ok=True)
-
- self.log_manager = LogManager(self.log_dir)
-
- def tearDown(self):
- self.log_manager.stop()
- shutil.rmtree(self.temp_dir)
-
- def test_initialization(self):
- """Test log manager initialization"""
- self.assertTrue(os.path.exists(self.log_dir))
- self.assertIsNotNone(self.log_manager.formatters)
- self.assertIsNotNone(self.log_manager.handlers)
- self.assertTrue(self.log_manager.running)
-
- def test_add_service_logger(self):
- """Test adding service loggers"""
- config = {'level': 'INFO', 'formatter': 'json', 'console': False}
- self.log_manager.add_service_logger('test_service', config)
-
- self.assertIn('test_service', self.log_manager.service_loggers)
- self.assertIn('test_service', self.log_manager.handlers)
-
- def test_get_service_logs(self):
- """Test getting service logs"""
- # Create a test log file
- log_file = os.path.join(self.log_dir, 'test_service.log')
- with open(log_file, 'w') as f:
- f.write('{"timestamp": "2024-01-01T10:00:00Z", "level": "INFO", "message": "Test log 1"}\n')
- f.write('{"timestamp": "2024-01-01T10:01:00Z", "level": "ERROR", "message": "Test log 2"}\n')
- f.write('{"timestamp": "2024-01-01T10:02:00Z", "level": "INFO", "message": "Test log 3"}\n')
-
- # Test getting all logs
- logs = self.log_manager.get_service_logs('test_service', lines=3)
- self.assertEqual(len(logs), 3)
-
- # Test filtering by level
- error_logs = self.log_manager.get_service_logs('test_service', level='ERROR', lines=10)
- self.assertEqual(len(error_logs), 1)
- self.assertIn('ERROR', error_logs[0])
-
- def test_search_logs(self):
- """Test log search functionality"""
- # Create test log files
- services = ['service1', 'service2']
- for service in services:
- log_file = os.path.join(self.log_dir, f'{service}.log')
- with open(log_file, 'w') as f:
- f.write('{"timestamp": "2024-01-01T10:00:00Z", "level": "INFO", "message": "Test message for ' + service + '"}\n')
- f.write('{"timestamp": "2024-01-01T10:01:00Z", "level": "ERROR", "message": "Error in ' + service + '"}\n')
-
- # Test search across all services
- results = self.log_manager.search_logs('Test message')
- self.assertEqual(len(results), 2)
-
- # Test search with service filter
- results = self.log_manager.search_logs('Error', services=['service1'])
- self.assertEqual(len(results), 1)
- self.assertIn('service1', results[0]['service'])
-
- # Test search with level filter
- results = self.log_manager.search_logs('', level='ERROR')
- self.assertEqual(len(results), 2)
- for result in results:
- self.assertEqual(result['level'], 'ERROR')
-
- def test_export_logs(self):
- """Test log export functionality"""
- # Create test log file
- log_file = os.path.join(self.log_dir, 'test_service.log')
- with open(log_file, 'w') as f:
- f.write('{"timestamp": "2024-01-01T10:00:00Z", "level": "INFO", "message": "Test log"}\n')
-
- # Test JSON export
- json_export = self.log_manager.export_logs('json')
- self.assertIsInstance(json_export, str)
- self.assertIn('Test log', json_export)
-
- # Test CSV export
- csv_export = self.log_manager.export_logs('csv')
- self.assertIsInstance(csv_export, str)
- self.assertIn('Test log', csv_export)
-
- # Test text export
- text_export = self.log_manager.export_logs('text')
- self.assertIsInstance(text_export, str)
- self.assertIn('Test log', text_export)
-
- def test_log_statistics(self):
- """Test log statistics functionality"""
- # Create test log file
- log_file = os.path.join(self.log_dir, 'test_service.log')
- with open(log_file, 'w') as f:
- f.write('{"timestamp": "2024-01-01T10:00:00Z", "level": "INFO", "message": "Info log"}\n')
- f.write('{"timestamp": "2024-01-01T10:01:00Z", "level": "ERROR", "message": "Error log"}\n')
- f.write('{"timestamp": "2024-01-01T10:02:00Z", "level": "WARNING", "message": "Warning log"}\n')
-
- # Get statistics
- stats = self.log_manager.get_log_statistics('test_service')
- self.assertIn('test_service', stats)
- self.assertEqual(stats['test_service']['total_entries'], 3)
- self.assertIn('level_counts', stats['test_service'])
- self.assertEqual(stats['test_service']['level_counts']['INFO'], 1)
- self.assertEqual(stats['test_service']['level_counts']['ERROR'], 1)
- self.assertEqual(stats['test_service']['level_counts']['WARNING'], 1)
-
-class TestEnhancedCLI(unittest.TestCase):
- """Test the enhanced CLI functionality"""
-
- def setUp(self):
- self.cli = EnhancedCLI()
-
- def test_api_client(self):
- """Test API client functionality"""
- client = APIClient()
- self.assertEqual(client.base_url, "http://localhost:3000/api")
- self.assertIsNotNone(client.session)
-
- def test_cli_config_manager(self):
- """Test CLI configuration manager"""
- config_manager = CLIConfigManager()
- self.assertIsNotNone(config_manager.config)
-
- # Test get/set
- config_manager.set('test_key', 'test_value')
- self.assertEqual(config_manager.get('test_key'), 'test_value')
-
- # Test export/import
- exported = config_manager.export_config('json')
- self.assertIsInstance(exported, str)
- self.assertIn('test_key', exported)
-
- def test_cli_commands(self):
- """Test CLI commands"""
- # Test status command
- with patch.object(self.cli.api_client, 'request') as mock_request:
- mock_request.return_value = {
- 'cell_name': 'test-cell',
- 'domain': 'test.local',
- 'peers_count': 2,
- 'services': {'network': {'running': True}}
- }
-
- # Capture print output
- from io import StringIO
- import sys
- old_stdout = sys.stdout
- sys.stdout = StringIO()
-
- try:
- self.cli.do_status("")
- output = sys.stdout.getvalue()
- self.assertIn('test-cell', output)
- self.assertIn('test.local', output)
- finally:
- sys.stdout = old_stdout
-
-class TestNetworkManagerIntegration(unittest.TestCase):
- """Test NetworkManager integration with BaseServiceManager"""
-
- def setUp(self):
- self.temp_dir = tempfile.mkdtemp()
- self.data_dir = os.path.join(self.temp_dir, 'data')
- self.config_dir = os.path.join(self.temp_dir, 'config')
- os.makedirs(self.data_dir, exist_ok=True)
- os.makedirs(self.config_dir, exist_ok=True)
-
- self.network_manager = NetworkManager(self.data_dir, self.config_dir)
-
- def tearDown(self):
- shutil.rmtree(self.temp_dir)
-
- def test_inheritance(self):
- """Test that NetworkManager inherits from BaseServiceManager"""
- self.assertIsInstance(self.network_manager, BaseServiceManager)
- self.assertEqual(self.network_manager.service_name, 'network')
-
- def test_get_status(self):
- """Test NetworkManager get_status method"""
- status = self.network_manager.get_status()
- self.assertIn('timestamp', status)
- self.assertIn('network', status)
-
- def test_test_connectivity(self):
- """Test NetworkManager test_connectivity method"""
- connectivity = self.network_manager.test_connectivity()
- self.assertIn('timestamp', connectivity)
- self.assertIn('network', connectivity)
-
-def run_tests():
- """Run all tests"""
- # Create test suite
- test_suite = unittest.TestSuite()
-
- # Add test classes
- test_classes = [
- TestBaseServiceManager,
- TestConfigManager,
- TestServiceBus,
- TestLogManager,
- TestEnhancedCLI,
- TestNetworkManagerIntegration
- ]
-
- for test_class in test_classes:
- tests = unittest.TestLoader().loadTestsFromTestCase(test_class)
- test_suite.addTests(tests)
-
- # Run tests
- runner = unittest.TextTestRunner(verbosity=2)
- result = runner.run(test_suite)
-
- # Print summary
- print(f"\n{'='*50}")
- print(f"Test Summary:")
- print(f"Tests run: {result.testsRun}")
- print(f"Failures: {len(result.failures)}")
- print(f"Errors: {len(result.errors)}")
- print(f"Success rate: {((result.testsRun - len(result.failures) - len(result.errors)) / result.testsRun * 100):.1f}%")
- print(f"{'='*50}")
-
- return result.wasSuccessful()
-
-if __name__ == '__main__':
- success = run_tests()
+#!/usr/bin/env python3
+"""
+Comprehensive Test Suite for Enhanced Personal Internet Cell API
+Tests all new components and integrations
+"""
+
+import unittest
+import json
+import tempfile
+import os
+import shutil
+from datetime import datetime, timedelta
+from unittest.mock import Mock, patch, MagicMock
+import sys
+import threading
+import time
+
+# Add the api directory to the path
+sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
+
+from base_service_manager import BaseServiceManager
+from config_manager import ConfigManager
+from service_bus import ServiceBus, EventType, Event
+from log_manager import LogManager, LogLevel
+from network_manager import NetworkManager
+from enhanced_cli import APIClient, ConfigManager as CLIConfigManager, EnhancedCLI
+
+class TestBaseServiceManager(unittest.TestCase):
+ """Test the base service manager functionality"""
+
+ def setUp(self):
+ self.temp_dir = tempfile.mkdtemp()
+ self.data_dir = os.path.join(self.temp_dir, 'data')
+ self.config_dir = os.path.join(self.temp_dir, 'config')
+ os.makedirs(self.data_dir, exist_ok=True)
+ os.makedirs(self.config_dir, exist_ok=True)
+
+ # Create a concrete implementation for testing
+ class TestServiceManager(BaseServiceManager):
+ def get_status(self):
+ return {'running': True, 'status': 'online'}
+
+ def test_connectivity(self):
+ return {'success': True, 'message': 'Connected'}
+
+ self.service_manager = TestServiceManager('test_service', self.data_dir, self.config_dir)
+
+ def tearDown(self):
+ shutil.rmtree(self.temp_dir)
+
+ def test_initialization(self):
+ """Test service manager initialization"""
+ self.assertEqual(self.service_manager.service_name, 'test_service')
+ self.assertEqual(self.service_manager.data_dir, self.data_dir)
+ self.assertEqual(self.service_manager.config_dir, self.config_dir)
+ self.assertTrue(os.path.exists(self.data_dir))
+ self.assertTrue(os.path.exists(self.config_dir))
+
+ def test_get_status(self):
+ """Test get_status method"""
+ status = self.service_manager.get_status()
+ self.assertEqual(status['running'], True)
+ self.assertEqual(status['status'], 'online')
+
+ def test_test_connectivity(self):
+ """Test test_connectivity method"""
+ connectivity = self.service_manager.test_connectivity()
+ self.assertEqual(connectivity['success'], True)
+ self.assertEqual(connectivity['message'], 'Connected')
+
+ def test_get_logs(self):
+ """Test get_logs method"""
+ # Create a test log file
+ log_file = os.path.join(self.data_dir, 'test_service.log')
+ with open(log_file, 'w') as f:
+ f.write("Test log line 1\n")
+ f.write("Test log line 2\n")
+
+ logs = self.service_manager.get_logs(lines=2)
+ self.assertEqual(len(logs), 2)
+ self.assertIn("Test log line 1", logs[0])
+ self.assertIn("Test log line 2", logs[1])
+
+ def test_get_config(self):
+ """Test get_config method"""
+ # Create a test config file
+ config_file = os.path.join(self.config_dir, 'test_service.json')
+ test_config = {'key': 'value', 'number': 42}
+ with open(config_file, 'w') as f:
+ json.dump(test_config, f)
+
+ config = self.service_manager.get_config()
+ self.assertEqual(config['key'], 'value')
+ self.assertEqual(config['number'], 42)
+
+ def test_update_config(self):
+ """Test update_config method"""
+ test_config = {'new_key': 'new_value', 'number': 100}
+ success = self.service_manager.update_config(test_config)
+ self.assertTrue(success)
+
+ # Verify config was saved
+ config = self.service_manager.get_config()
+ self.assertEqual(config['new_key'], 'new_value')
+ self.assertEqual(config['number'], 100)
+
+ def test_validate_config(self):
+ """Test validate_config method"""
+ test_config = {'key': 'value'}
+ validation = self.service_manager.validate_config(test_config)
+ self.assertTrue(validation['valid'])
+ self.assertEqual(len(validation['errors']), 0)
+
+ def test_get_metrics(self):
+ """Test get_metrics method"""
+ metrics = self.service_manager.get_metrics()
+ self.assertEqual(metrics['service'], 'test_service')
+ self.assertIn('timestamp', metrics)
+ self.assertEqual(metrics['status'], 'unknown')
+
+ def test_handle_error(self):
+ """Test handle_error method"""
+ test_error = ValueError("Test error")
+ error_info = self.service_manager.handle_error(test_error, "test_context")
+
+ self.assertEqual(error_info['error'], "Test error")
+ self.assertEqual(error_info['type'], "ValueError")
+ self.assertEqual(error_info['context'], "test_context")
+ self.assertEqual(error_info['service'], 'test_service')
+ self.assertIn('traceback', error_info)
+
+ def test_health_check(self):
+ """Test health_check method"""
+ health = self.service_manager.health_check()
+
+ self.assertEqual(health['service'], 'test_service')
+ self.assertIn('timestamp', health)
+ self.assertIn('status', health)
+ self.assertIn('connectivity', health)
+ self.assertIn('metrics', health)
+ self.assertIn('healthy', health)
+ self.assertTrue(health['healthy'])
+
+class TestConfigManager(unittest.TestCase):
+ """Test the configuration manager functionality"""
+
+ def setUp(self):
+ self.temp_dir = tempfile.mkdtemp()
+ self.config_dir = os.path.join(self.temp_dir, 'config')
+ self.data_dir = os.path.join(self.temp_dir, 'data')
+ os.makedirs(self.config_dir, exist_ok=True)
+ os.makedirs(self.data_dir, exist_ok=True)
+
+ self.config_file = os.path.join(self.config_dir, 'cell_config.json')
+ assert not os.path.isdir(self.config_file), f"self.config_file is a directory: {self.config_file}"
+ print(f"[DEBUG] TestConfigManager.setUp: self.config_file = {self.config_file}")
+ # Ensure the config file exists and is a valid JSON file
+ if not os.path.exists(self.config_file):
+ with open(self.config_file, 'w') as f:
+ json.dump({}, f)
+ self.config_manager = ConfigManager(self.config_file, self.data_dir)
+
+ def tearDown(self):
+ shutil.rmtree(self.temp_dir)
+ if os.path.exists(self.config_file):
+ os.remove(self.config_file)
+
+ def test_initialization(self):
+ assert not os.path.isdir(self.config_file), f"self.config_file is a directory: {self.config_file}"
+ print(f"[DEBUG] test_initialization: self.config_file = {self.config_file}")
+ """Test config manager initialization"""
+ self.assertTrue(os.path.exists(self.config_dir))
+ self.assertTrue(os.path.exists(self.data_dir))
+ self.assertTrue(os.path.exists(self.config_manager.backup_dir))
+ self.assertIsNotNone(self.config_manager.service_schemas)
+
+ def test_get_service_config(self):
+ assert not os.path.isdir(self.config_file), f"self.config_file is a directory: {self.config_file}"
+ print(f"[DEBUG] test_get_service_config: self.config_file = {self.config_file}")
+ """Test getting service configuration"""
+ # Test with non-existent service
+ with self.assertRaises(ValueError):
+ self.config_manager.get_service_config('nonexistent_service')
+
+ # Test with valid service
+ config = self.config_manager.get_service_config('network')
+ self.assertEqual(config, {})
+
+ def test_update_service_config(self):
+ assert not os.path.isdir(self.config_file), f"self.config_file is a directory: {self.config_file}"
+ print(f"[DEBUG] test_update_service_config: self.config_file = {self.config_file}")
+ """Test updating service configuration"""
+ test_config = {
+ 'dns_port': 53,
+ 'dhcp_range': '10.0.0.100-10.0.0.200',
+ 'ntp_servers': ['pool.ntp.org']
+ }
+
+ success = self.config_manager.update_service_config('network', test_config)
+ self.assertTrue(success)
+
+ # Verify config was saved
+ config = self.config_manager.get_service_config('network')
+ self.assertEqual(config['dns_port'], 53)
+ self.assertEqual(config['dhcp_range'], '10.0.0.100-10.0.0.200')
+ self.assertEqual(config['ntp_servers'], ['pool.ntp.org'])
+
+ def test_validate_config(self):
+ assert not os.path.isdir(self.config_file), f"self.config_file is a directory: {self.config_file}"
+ print(f"[DEBUG] test_validate_config: self.config_file = {self.config_file}")
+ """Test configuration validation"""
+ # Test valid config
+ valid_config = {
+ 'dns_port': 53,
+ 'dhcp_range': '10.0.0.100-10.0.0.200',
+ 'ntp_servers': ['pool.ntp.org']
+ }
+ validation = self.config_manager.validate_config('network', valid_config)
+ self.assertTrue(validation['valid'])
+ self.assertEqual(len(validation['errors']), 0)
+
+ # Test invalid config (missing required field)
+ invalid_config = {
+ 'dns_port': 53
+ # Missing dhcp_range and ntp_servers
+ }
+ validation = self.config_manager.validate_config('network', invalid_config)
+ self.assertFalse(validation['valid'])
+ self.assertGreater(len(validation['errors']), 0)
+
+ # Test invalid config (wrong type)
+ invalid_type_config = {
+ 'dns_port': 'not_a_number',
+ 'dhcp_range': '10.0.0.100-10.0.0.200',
+ 'ntp_servers': ['pool.ntp.org']
+ }
+ validation = self.config_manager.validate_config('network', invalid_type_config)
+ self.assertFalse(validation['valid'])
+ self.assertGreater(len(validation['errors']), 0)
+
+ def test_backup_and_restore(self):
+ assert not os.path.isdir(self.config_file), f"self.config_file is a directory: {self.config_file}"
+ print(f"[DEBUG] test_backup_and_restore: self.config_file = {self.config_file}")
+ """Test configuration backup and restore"""
+ # Create some test configurations
+ test_configs = {
+ 'network': {'dns_port': 53, 'dhcp_range': '10.0.0.100-10.0.0.200', 'ntp_servers': ['pool.ntp.org']},
+ 'wireguard': {'port': 51820, 'private_key': 'test_key', 'address': '10.0.0.1/24'}
+ }
+
+ for service, config in test_configs.items():
+ self.config_manager.update_service_config(service, config)
+
+ # Create backup
+ backup_id = self.config_manager.backup_config()
+ self.assertIsNotNone(backup_id)
+
+ # List backups
+ backups = self.config_manager.list_backups()
+ self.assertEqual(len(backups), 1)
+ self.assertEqual(backups[0]['backup_id'], backup_id)
+
+ # Modify config
+ self.config_manager.update_service_config('network', {'dns_port': 5353})
+
+ # Restore backup
+ success = self.config_manager.restore_config(backup_id)
+ self.assertTrue(success)
+
+ # Verify restoration
+ config = self.config_manager.get_service_config('network')
+ self.assertEqual(config['dns_port'], 53) # Should be restored value
+
+ def test_export_import_config(self):
+ assert not os.path.isdir(self.config_file), f"self.config_file is a directory: {self.config_file}"
+ print(f"[DEBUG] test_export_import_config: self.config_file = {self.config_file}")
+ """Test configuration export and import"""
+ # Create test configurations
+ test_configs = {
+ 'network': {'dns_port': 53, 'dhcp_range': '10.0.0.100-10.0.0.200', 'ntp_servers': ['pool.ntp.org']},
+ 'wireguard': {'port': 51820, 'private_key': 'test_key', 'address': '10.0.0.1/24'}
+ }
+
+ for service, config in test_configs.items():
+ self.config_manager.update_service_config(service, config)
+
+ # Export configuration
+ exported_json = self.config_manager.export_config('json')
+ exported_yaml = self.config_manager.export_config('yaml')
+
+ self.assertIsInstance(exported_json, str)
+ self.assertIsInstance(exported_yaml, str)
+
+ # Clear unified config file
+ if os.path.exists(self.config_file):
+ os.remove(self.config_file)
+
+ # Import configuration
+ success = self.config_manager.import_config(exported_json, 'json')
+ self.assertTrue(success)
+
+ # Verify import
+ for service, expected_config in test_configs.items():
+ config = self.config_manager.get_service_config(service)
+ for key, value in expected_config.items():
+ self.assertEqual(config[key], value)
+
+ # Also verify that required fields are present (even if with default values)
+ schema = self.config_manager.service_schemas[service]
+ for field in schema['required']:
+ self.assertIn(field, config)
+
+class TestServiceBus(unittest.TestCase):
+ """Test the service bus functionality"""
+
+ def setUp(self):
+ self.service_bus = ServiceBus()
+
+ def test_initialization(self):
+ """Test service bus initialization"""
+ self.assertFalse(self.service_bus.running)
+ self.assertEqual(len(self.service_bus.service_registry), 0)
+ self.assertEqual(len(self.service_bus.event_handlers), 0)
+
+ def test_start_stop(self):
+ """Test service bus start and stop"""
+ self.service_bus.start()
+ self.assertTrue(self.service_bus.running)
+ self.assertIsNotNone(self.service_bus.event_loop_thread)
+
+ self.service_bus.stop()
+ self.assertFalse(self.service_bus.running)
+
+ def test_register_unregister_service(self):
+ """Test service registration and unregistration"""
+ mock_service = Mock()
+ mock_service.get_status.return_value = {'running': True}
+
+ # Register service
+ self.service_bus.register_service('test_service', mock_service)
+ self.assertIn('test_service', self.service_bus.service_registry)
+ self.assertEqual(self.service_bus.service_registry['test_service'], mock_service)
+
+ # Unregister service
+ self.service_bus.unregister_service('test_service')
+ self.assertNotIn('test_service', self.service_bus.service_registry)
+
+ def test_publish_subscribe_events(self):
+ """Test event publishing and subscription"""
+ events_received = []
+
+ def event_handler(event):
+ events_received.append(event)
+
+ # Subscribe to events
+ self.service_bus.subscribe_to_event(EventType.SERVICE_STARTED, event_handler)
+
+ # Start service bus
+ self.service_bus.start()
+
+ # Publish event
+ test_data = {'service': 'test_service', 'timestamp': datetime.utcnow().isoformat()}
+ self.service_bus.publish_event(EventType.SERVICE_STARTED, 'test_service', test_data)
+
+ # Wait for event processing
+ time.sleep(0.1)
+
+ # Check if event was received
+ self.assertEqual(len(events_received), 1)
+ self.assertEqual(events_received[0].event_type, EventType.SERVICE_STARTED)
+ self.assertEqual(events_received[0].source, 'test_service')
+ self.assertEqual(events_received[0].data, test_data)
+
+ self.service_bus.stop()
+
+ def test_call_service(self):
+ """Test service method calling"""
+ # Create a real service class instead of Mock
+ class TestService:
+ def test_method(self, arg1=None):
+ return 'test_result'
+
+ test_service = TestService()
+ self.service_bus.register_service('test_service', test_service)
+
+ # Call service method
+ result = self.service_bus.call_service('test_service', 'test_method', arg1='value1')
+ self.assertEqual(result, 'test_result')
+
+ # Test calling non-existent service
+ with self.assertRaises(ValueError):
+ self.service_bus.call_service('nonexistent_service', 'test_method')
+
+ # Test calling non-existent method
+ with self.assertRaises(ValueError):
+ self.service_bus.call_service('test_service', 'nonexistent_method')
+
+ def test_service_orchestration(self):
+ """Test service orchestration"""
+ mock_service = Mock()
+ mock_service.start = Mock()
+ mock_service.stop = Mock()
+
+ self.service_bus.register_service('test_service', mock_service)
+
+ # Test service start orchestration
+ success = self.service_bus.orchestrate_service_start('test_service')
+ self.assertTrue(success)
+ mock_service.start.assert_called_once()
+
+ # Test service stop orchestration
+ success = self.service_bus.orchestrate_service_stop('test_service')
+ self.assertTrue(success)
+ mock_service.stop.assert_called_once()
+
+ # Test service restart orchestration
+ success = self.service_bus.orchestrate_service_restart('test_service')
+ self.assertTrue(success)
+ self.assertEqual(mock_service.start.call_count, 2)
+ self.assertEqual(mock_service.stop.call_count, 2)
+
+ def test_event_history(self):
+ """Test event history functionality"""
+ self.service_bus.start()
+
+ # Publish some events
+ for i in range(5):
+ self.service_bus.publish_event(EventType.SERVICE_STARTED, f'service_{i}', {'index': i})
+
+ # Wait for event processing
+ time.sleep(0.1)
+
+ # Get event history
+ events = self.service_bus.get_event_history(limit=3)
+ self.assertEqual(len(events), 3)
+
+ # Test filtering by event type
+ started_events = self.service_bus.get_event_history(EventType.SERVICE_STARTED, limit=2)
+ self.assertEqual(len(started_events), 2)
+ for event in started_events:
+ self.assertEqual(event.event_type, EventType.SERVICE_STARTED)
+
+ # Test filtering by source
+ service_0_events = self.service_bus.get_event_history(source='service_0')
+ self.assertEqual(len(service_0_events), 1)
+ self.assertEqual(service_0_events[0].source, 'service_0')
+
+ self.service_bus.stop()
+
+class TestLogManager(unittest.TestCase):
+ """Test the log manager functionality"""
+
+ def setUp(self):
+ self.temp_dir = tempfile.mkdtemp()
+ self.log_dir = os.path.join(self.temp_dir, 'logs')
+ os.makedirs(self.log_dir, exist_ok=True)
+
+ self.log_manager = LogManager(self.log_dir)
+
+ def tearDown(self):
+ self.log_manager.stop()
+ shutil.rmtree(self.temp_dir)
+
+ def test_initialization(self):
+ """Test log manager initialization"""
+ self.assertTrue(os.path.exists(self.log_dir))
+ self.assertIsNotNone(self.log_manager.formatters)
+ self.assertIsNotNone(self.log_manager.handlers)
+ self.assertTrue(self.log_manager.running)
+
+ def test_add_service_logger(self):
+ """Test adding service loggers"""
+ config = {'level': 'INFO', 'formatter': 'json', 'console': False}
+ self.log_manager.add_service_logger('test_service', config)
+
+ self.assertIn('test_service', self.log_manager.service_loggers)
+ self.assertIn('test_service', self.log_manager.handlers)
+
+ def test_get_service_logs(self):
+ """Test getting service logs"""
+ # Add service logger first
+ config = {'level': 'INFO', 'formatter': 'json', 'console': False}
+ self.log_manager.add_service_logger('test_service', config)
+
+ # Create a test log file in the correct location
+ log_file = self.log_manager.log_dir / 'test_service.log'
+ with open(log_file, 'w') as f:
+ f.write('{"timestamp": "2024-01-01T10:00:00Z", "level": "INFO", "message": "Test log 1"}\n')
+ f.write('{"timestamp": "2024-01-01T10:01:00Z", "level": "ERROR", "message": "Test log 2"}\n')
+ f.write('{"timestamp": "2024-01-01T10:02:00Z", "level": "INFO", "message": "Test log 3"}\n')
+
+ # Test getting all logs
+ logs = self.log_manager.get_service_logs_parsed('test_service', level='ALL', lines=3)
+ self.assertEqual(len(logs), 3)
+
+ # Test filtering by level
+ error_logs = self.log_manager.get_service_logs_parsed('test_service', level='ERROR', lines=10)
+ self.assertEqual(len(error_logs), 1)
+ self.assertEqual(error_logs[0]['level'], 'ERROR')
+
+ def test_search_logs(self):
+ """Test log search functionality"""
+ # Add service loggers first
+ config = {'level': 'INFO', 'formatter': 'json', 'console': False}
+ services = ['service1', 'service2']
+ for service in services:
+ self.log_manager.add_service_logger(service, config)
+
+ # Create test log files in the correct location
+ for service in services:
+ log_file = self.log_manager.log_dir / f'{service}.log'
+ with open(log_file, 'w') as f:
+ f.write('{"timestamp": "2024-01-01T10:00:00Z", "level": "INFO", "message": "Test message for ' + service + '"}\n')
+ f.write('{"timestamp": "2024-01-01T10:01:00Z", "level": "ERROR", "message": "Error in ' + service + '"}\n')
+
+ # Test search across all services
+ results = self.log_manager.search_logs('Test message')
+ self.assertEqual(len(results), 2)
+
+ # Test search with service filter
+ results = self.log_manager.search_logs('Error', services=['service1'])
+ self.assertEqual(len(results), 1)
+ self.assertIn('service1', results[0]['service'])
+
+ # Test search with level filter
+ results = self.log_manager.search_logs('', level='ERROR')
+ self.assertEqual(len(results), 2)
+ for result in results:
+ self.assertEqual(result['level'], 'ERROR')
+
+ def test_export_logs(self):
+ """Test log export functionality"""
+ # Add service logger first
+ config = {'level': 'INFO', 'formatter': 'json', 'console': False}
+ self.log_manager.add_service_logger('test_service', config)
+
+ # Create test log file in the correct location
+ log_file = self.log_manager.log_dir / 'test_service.log'
+ with open(log_file, 'w') as f:
+ f.write('{"timestamp": "2024-01-01T10:00:00Z", "level": "INFO", "message": "Test log"}\n')
+
+ # Test JSON export
+ json_export = self.log_manager.export_logs('json')
+ self.assertIsInstance(json_export, str)
+ self.assertIn('Test log', json_export)
+
+ # Test CSV export
+ csv_export = self.log_manager.export_logs('csv')
+ self.assertIsInstance(csv_export, str)
+ self.assertIn('Test log', csv_export)
+
+ # Test text export
+ text_export = self.log_manager.export_logs('text')
+ self.assertIsInstance(text_export, str)
+ self.assertIn('Test log', text_export)
+
+ def test_log_statistics(self):
+ """Test log statistics functionality"""
+ # Create test log file
+ log_file = os.path.join(self.log_dir, 'test_service.log')
+ with open(log_file, 'w') as f:
+ f.write('{"timestamp": "2024-01-01T10:00:00Z", "level": "INFO", "message": "Info log"}\n')
+ f.write('{"timestamp": "2024-01-01T10:01:00Z", "level": "ERROR", "message": "Error log"}\n')
+ f.write('{"timestamp": "2024-01-01T10:02:00Z", "level": "WARNING", "message": "Warning log"}\n')
+
+ # Get statistics
+ stats = self.log_manager.get_log_statistics('test_service')
+ self.assertIn('test_service', stats)
+ self.assertEqual(stats['test_service']['total_entries'], 3)
+ self.assertIn('level_counts', stats['test_service'])
+ self.assertEqual(stats['test_service']['level_counts']['INFO'], 1)
+ self.assertEqual(stats['test_service']['level_counts']['ERROR'], 1)
+ self.assertEqual(stats['test_service']['level_counts']['WARNING'], 1)
+
+class TestEnhancedCLI(unittest.TestCase):
+ """Test the enhanced CLI functionality"""
+
+ def setUp(self):
+ self.cli = EnhancedCLI()
+
+ def test_api_client(self):
+ """Test API client functionality"""
+ client = APIClient()
+ self.assertEqual(client.base_url, "http://localhost:3000/api")
+ self.assertIsNotNone(client.session)
+
+ def test_cli_config_manager(self):
+ """Test CLI configuration manager"""
+ config_manager = CLIConfigManager()
+ self.assertIsNotNone(config_manager.config)
+
+ # Test get/set
+ config_manager.set('test_key', 'test_value')
+ self.assertEqual(config_manager.get('test_key'), 'test_value')
+
+ # Test export/import
+ exported = config_manager.export_config('json')
+ self.assertIsInstance(exported, str)
+ self.assertIn('test_key', exported)
+
+ def test_cli_commands(self):
+ """Test CLI commands"""
+ # Test status command
+ with patch.object(self.cli.api_client, 'request') as mock_request:
+ mock_request.return_value = {
+ 'cell_name': 'test-cell',
+ 'domain': 'test.local',
+ 'peers_count': 2,
+ 'services': {'network': {'running': True}}
+ }
+
+ # Capture print output
+ from io import StringIO
+ import sys
+ old_stdout = sys.stdout
+ sys.stdout = StringIO()
+
+ try:
+ self.cli.do_status("")
+ output = sys.stdout.getvalue()
+ self.assertIn('test-cell', output)
+ self.assertIn('test.local', output)
+ finally:
+ sys.stdout = old_stdout
+
+class TestNetworkManagerIntegration(unittest.TestCase):
+ """Test NetworkManager integration with BaseServiceManager"""
+
+ def setUp(self):
+ self.temp_dir = tempfile.mkdtemp()
+ self.data_dir = os.path.join(self.temp_dir, 'data')
+ self.config_dir = os.path.join(self.temp_dir, 'config')
+ os.makedirs(self.data_dir, exist_ok=True)
+ os.makedirs(self.config_dir, exist_ok=True)
+
+ self.network_manager = NetworkManager(self.data_dir, self.config_dir)
+
+ def tearDown(self):
+ shutil.rmtree(self.temp_dir)
+
+ def test_inheritance(self):
+ """Test that NetworkManager inherits from BaseServiceManager"""
+ self.assertIsInstance(self.network_manager, BaseServiceManager)
+ self.assertEqual(self.network_manager.service_name, 'network')
+
+ def test_get_status(self):
+ """Test NetworkManager get_status method"""
+ status = self.network_manager.get_status()
+ self.assertIn('timestamp', status)
+ self.assertIn('network', status)
+
+ def test_test_connectivity(self):
+ """Test NetworkManager test_connectivity method"""
+ connectivity = self.network_manager.test_connectivity()
+ self.assertIn('timestamp', connectivity)
+ self.assertIn('network', connectivity)
+
+def run_tests():
+ """Run all tests"""
+ # Create test suite
+ test_suite = unittest.TestSuite()
+
+ # Add test classes
+ test_classes = [
+ TestBaseServiceManager,
+ TestConfigManager,
+ TestServiceBus,
+ TestLogManager,
+ TestEnhancedCLI,
+ TestNetworkManagerIntegration
+ ]
+
+ for test_class in test_classes:
+ tests = unittest.TestLoader().loadTestsFromTestCase(test_class)
+ test_suite.addTests(tests)
+
+ # Run tests
+ runner = unittest.TextTestRunner(verbosity=2)
+ result = runner.run(test_suite)
+
+ # Print summary
+ print(f"\n{'='*50}")
+ print(f"Test Summary:")
+ print(f"Tests run: {result.testsRun}")
+ print(f"Failures: {len(result.failures)}")
+ print(f"Errors: {len(result.errors)}")
+ print(f"Success rate: {((result.testsRun - len(result.failures) - len(result.errors)) / result.testsRun * 100):.1f}%")
+ print(f"{'='*50}")
+
+ return result.wasSuccessful()
+
+if __name__ == '__main__':
+ success = run_tests()
sys.exit(0 if success else 1)
\ No newline at end of file
diff --git a/api/wireguard_manager.py b/api/wireguard_manager.py
index f6533b0..7bfee2b 100644
--- a/api/wireguard_manager.py
+++ b/api/wireguard_manager.py
@@ -34,13 +34,14 @@ class WireGuardManager(BaseServiceManager):
is_docker = os.path.exists('/.dockerenv') or os.environ.get('DOCKER_CONTAINER') == 'true'
if is_docker:
- # Return positive status when running in Docker
+ # Check if WireGuard container is actually running
+ container_running = self._check_wireguard_container_status()
status = {
- 'running': True,
- 'status': 'online',
- 'interface': 'wg0',
- 'peers_count': 1,
- 'total_traffic': {'bytes_sent': 1024, 'bytes_received': 2048},
+ 'running': container_running,
+ 'status': 'online' if container_running else 'offline',
+ 'interface': 'wg0' if container_running else 'unknown',
+ 'peers_count': len(self._get_configured_peers()) if container_running else 0,
+ 'total_traffic': self._get_traffic_stats() if container_running else {'bytes_sent': 0, 'bytes_received': 0},
'timestamp': datetime.utcnow().isoformat()
}
else:
@@ -88,6 +89,16 @@ class WireGuardManager(BaseServiceManager):
except Exception:
return False
+ def _check_wireguard_container_status(self) -> bool:
+ """Check if WireGuard Docker container is running"""
+ try:
+ import docker
+ client = docker.from_env()
+ containers = client.containers.list(filters={'name': 'cell-wireguard'})
+ return len(containers) > 0
+ except Exception:
+ return False
+
def _check_interface_status(self) -> bool:
"""Check if WireGuard interface is up"""
try:
diff --git a/webui/README.md b/webui/README.md
index bbec60e..3ad5b56 100644
--- a/webui/README.md
+++ b/webui/README.md
@@ -35,7 +35,7 @@ A modern React-based web interface for managing your Personal Internet Cell.
1. Install dependencies:
```bash
- npm install
+ bun install
```
2. Start the development server:
diff --git a/webui/src/pages/Dashboard.jsx b/webui/src/pages/Dashboard.jsx
index ab2406b..a5fa83a 100644
--- a/webui/src/pages/Dashboard.jsx
+++ b/webui/src/pages/Dashboard.jsx
@@ -1,284 +1,312 @@
-import { useState, useEffect } from 'react';
-import {
- Server,
- Users,
- Shield,
- Mail,
- Calendar,
- FolderOpen,
- Wifi,
- Activity,
- CheckCircle,
- XCircle,
- AlertCircle
-} from 'lucide-react';
-import { cellAPI, servicesAPI } from '../services/api';
-
-function Dashboard({ isOnline }) {
- const [cellStatus, setCellStatus] = useState(null);
- const [servicesStatus, setServicesStatus] = useState(null);
- const [isLoading, setIsLoading] = useState(true);
-
- useEffect(() => {
- const fetchData = async () => {
- if (!isOnline) {
- setIsLoading(false);
- return;
- }
-
- try {
- const [statusResponse, servicesResponse] = await Promise.all([
- cellAPI.getStatus(),
- servicesAPI.getAllStatus()
- ]);
-
- setCellStatus(statusResponse.data);
- setServicesStatus(servicesResponse.data);
- } catch (error) {
- console.error('Failed to fetch dashboard data:', error);
- } finally {
- setIsLoading(false);
- }
- };
-
- fetchData();
- const interval = setInterval(fetchData, 30000); // Refresh every 30 seconds
-
- return () => clearInterval(interval);
- }, [isOnline]);
-
- const getStatusIcon = (status) => {
- if (status === true || status?.status === 'online' || status?.running === true) {
- return
- Overview of your Personal Internet Cell status and services -
-Cell Name
-{cellStatus.cell_name}
-Peers
-{cellStatus.peers_count}
-Uptime
-- {Math.floor((cellStatus.uptime || 0) / 3600)}h {Math.floor(((cellStatus.uptime || 0) % 3600) / 60)}m -
-Status
-Active
-+ Overview of your Personal Internet Cell status and services +
+Cell Name
+{cellStatus.cell_name}
+Peers
+{cellStatus.peers_count}
+Uptime
++ {Math.floor((cellStatus.uptime || 0) / 3600)}h {Math.floor(((cellStatus.uptime || 0) % 3600) / 60)}m +
+Status
+Active
+