AUTARCH v1.9 — remote monitoring, SSH manager, daemon, vault, cleanup

- Add Remote Monitoring Station with PIAP device profile system
- Add SSH/SSHD manager with fail2ban integration
- Add privileged daemon architecture for safe root operations
- Add encrypted vault, HAL memory, HAL auto-analyst
- Add network security suite, module creator, codex training
- Add start.sh launcher script and GTK3 desktop launcher
- Remove Output/ build artifacts, installer files, loose docs
- Update .gitignore for runtime data and build artifacts
- Update README for v1.9 with new launch method, screenshots, and features

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
SsSnake
2026-03-24 06:59:06 -07:00
parent 1092689f45
commit da53899f66
382 changed files with 15277 additions and 493964 deletions

View File

@@ -46,37 +46,32 @@ class AgentResult:
class Agent:
"""Autonomous agent that uses LLM and tools to accomplish tasks."""
SYSTEM_PROMPT = """You are AUTARCH, an autonomous AI agent created by darkHal and Setec Security Labs.
SYSTEM_PROMPT = """You are Hal, the autonomous agent for AUTARCH (darkHal Security Group).
Your purpose is to accomplish tasks using the tools available to you. You think step by step, use tools to gather information and take actions, then continue until the task is complete.
RULES:
- No markdown. No ASCII art. Plain text only.
- Detect the OS before running commands. Run "uname -s" first if unsure.
- Only run commands for the detected OS. Never list commands for multiple platforms.
- On Linux: detect the distro first (cat /etc/os-release). Use apt for Debian/Ubuntu, dnf for Fedora, etc.
- Never prefix commands with sudo. The system handles root access automatically via the daemon.
- Run ONE command at a time. Check the result before continuing.
- Keep responses short. No filler.
## How to respond
FORMAT — you MUST use this exact format for every response:
You MUST respond in the following format for EVERY response:
THOUGHT: your reasoning
ACTION: tool_name
PARAMS: {{"param": "value"}}
THOUGHT: [Your reasoning about what to do next]
ACTION: [tool_name]
PARAMS: {"param1": "value1", "param2": "value2"}
OR when the task is complete:
THOUGHT: [Summary of what was accomplished]
When done:
THOUGHT: summary
ACTION: task_complete
PARAMS: {"summary": "Description of completed work"}
PARAMS: {{"summary": "what was done"}}
OR when you need user input:
THOUGHT: [Why you need to ask the user]
When you need input:
THOUGHT: why you need to ask
ACTION: ask_user
PARAMS: {"question": "Your question"}
## Rules
1. Always start with THOUGHT to explain your reasoning
2. Always specify exactly one ACTION
3. Always provide PARAMS as valid JSON (even if empty: {})
4. Use tools to verify your work - don't assume success
5. If a tool fails, analyze the error and try a different approach
6. Only use task_complete when the task is fully done
PARAMS: {{"question": "your question"}}
{tools_description}
"""

View File

@@ -82,6 +82,32 @@ class Config:
'secret_key': '',
'mcp_port': '8081',
},
'mcp': {
'enabled': 'false',
'auto_start': 'false',
'transport': 'sse',
'host': '0.0.0.0',
'port': '8081',
'log_level': 'INFO',
'instructions': 'AUTARCH security framework tools',
'auth_enabled': 'false',
'auth_token': '',
'rate_limit': '',
'mask_errors': 'false',
'request_timeout': '120',
'max_message_size': '10485760',
'cors_origins': '*',
'ssl_enabled': 'false',
'ssl_cert': '',
'ssl_key': '',
'disabled_tools': '',
'nmap_timeout': '120',
'tcpdump_timeout': '30',
'whois_timeout': '15',
'dns_timeout': '10',
'geoip_timeout': '10',
'geoip_endpoint': 'http://ip-api.com/json/',
},
'revshell': {
'enabled': 'true',
'host': '0.0.0.0',
@@ -112,6 +138,23 @@ class Config:
'n_gpu_layers': '-1',
'n_threads': '4',
},
'agents': {
'backend': 'local',
'local_max_steps': '20',
'local_verbose': 'true',
'claude_enabled': 'false',
'claude_model': 'claude-sonnet-4-6',
'claude_max_tokens': '16384',
'claude_max_steps': '30',
'openai_enabled': 'false',
'openai_model': 'gpt-4o',
'openai_base_url': 'https://api.openai.com/v1',
'openai_max_tokens': '16384',
'openai_max_steps': '30',
},
'hal_memory': {
'max_bytes': str(4 * 1024 * 1024 * 1024), # 4GB default
},
'autonomy': {
'enabled': 'false',
'monitor_interval': '3',
@@ -267,6 +310,42 @@ class Config:
'save_raw_output': self.get_bool('pentest', 'save_raw_output', True),
}
def _get_secret(self, key: str, fallback_section: str = '', fallback_key: str = 'api_key') -> str:
"""Get a secret from the vault, falling back to plaintext config.
This allows a gradual migration: existing plaintext keys still work,
but new keys go to the vault. When a plaintext key is found, it's
automatically migrated to the vault on next save.
"""
try:
from core.vault import get_vault
vault = get_vault()
val = vault.get(key, '')
if val:
return val
except Exception:
pass
# Fallback to plaintext config
if fallback_section:
return self.get(fallback_section, fallback_key, '')
return ''
def _set_secret(self, key: str, value: str, config_section: str = '', config_key: str = 'api_key'):
"""Store a secret in the vault and clear the plaintext config value."""
if not value:
return
try:
from core.vault import get_vault
vault = get_vault()
vault.set(key, value)
# Clear plaintext from config file for security
if config_section:
self.set(config_section, config_key, '')
except Exception:
# Vault not available — store in config as before
if config_section:
self.set(config_section, config_key, value)
def get_claude_settings(self) -> dict:
"""Get all Claude API settings as a dictionary.
@@ -274,7 +353,7 @@ class Config:
Dictionary with Claude API settings properly typed
"""
return {
'api_key': self.get('claude', 'api_key', ''),
'api_key': self._get_secret('claude_api_key', 'claude', 'api_key'),
'model': self.get('claude', 'model', 'claude-sonnet-4-20250514'),
'max_tokens': self.get_int('claude', 'max_tokens', 4096),
'temperature': self.get_float('claude', 'temperature', 0.7),
@@ -305,7 +384,7 @@ class Config:
def get_huggingface_settings(self) -> dict:
"""Get all HuggingFace Inference API settings as a dictionary."""
return {
'api_key': self.get('huggingface', 'api_key', ''),
'api_key': self._get_secret('huggingface_api_key', 'huggingface', 'api_key'),
'model': self.get('huggingface', 'model', 'mistralai/Mistral-7B-Instruct-v0.3'),
'endpoint': self.get('huggingface', 'endpoint', ''),
'provider': self.get('huggingface', 'provider', 'auto'),
@@ -322,7 +401,7 @@ class Config:
def get_openai_settings(self) -> dict:
"""Get all OpenAI API settings as a dictionary."""
return {
'api_key': self.get('openai', 'api_key', ''),
'api_key': self._get_secret('openai_api_key', 'openai', 'api_key'),
'base_url': self.get('openai', 'base_url', 'https://api.openai.com/v1'),
'model': self.get('openai', 'model', 'gpt-4o'),
'max_tokens': self.get_int('openai', 'max_tokens', 4096),
@@ -355,6 +434,35 @@ class Config:
'mappings': self.get('upnp', 'mappings', ''),
}
def get_mcp_settings(self) -> dict:
"""Get all MCP server settings as a dictionary."""
return {
'enabled': self.get_bool('mcp', 'enabled', False),
'auto_start': self.get_bool('mcp', 'auto_start', False),
'transport': self.get('mcp', 'transport', 'sse'),
'host': self.get('mcp', 'host', '0.0.0.0'),
'port': self.get_int('mcp', 'port', 8081),
'log_level': self.get('mcp', 'log_level', 'INFO'),
'instructions': self.get('mcp', 'instructions', 'AUTARCH security framework tools'),
'auth_enabled': self.get_bool('mcp', 'auth_enabled', False),
'auth_token': self.get('mcp', 'auth_token', ''),
'rate_limit': self.get('mcp', 'rate_limit', ''),
'mask_errors': self.get_bool('mcp', 'mask_errors', False),
'request_timeout': self.get_int('mcp', 'request_timeout', 120),
'max_message_size': self.get_int('mcp', 'max_message_size', 10485760),
'cors_origins': self.get('mcp', 'cors_origins', '*'),
'ssl_enabled': self.get_bool('mcp', 'ssl_enabled', False),
'ssl_cert': self.get('mcp', 'ssl_cert', ''),
'ssl_key': self.get('mcp', 'ssl_key', ''),
'disabled_tools': self.get('mcp', 'disabled_tools', ''),
'nmap_timeout': self.get_int('mcp', 'nmap_timeout', 120),
'tcpdump_timeout': self.get_int('mcp', 'tcpdump_timeout', 30),
'whois_timeout': self.get_int('mcp', 'whois_timeout', 15),
'dns_timeout': self.get_int('mcp', 'dns_timeout', 10),
'geoip_timeout': self.get_int('mcp', 'geoip_timeout', 10),
'geoip_endpoint': self.get('mcp', 'geoip_endpoint', 'http://ip-api.com/json/'),
}
def get_revshell_settings(self) -> dict:
"""Get all reverse shell settings as a dictionary."""
return {
@@ -364,6 +472,35 @@ class Config:
'auto_start': self.get_bool('revshell', 'auto_start', False),
}
def get_mcp_settings(self) -> dict:
"""Get MCP server settings."""
return {
'enabled': self.get_bool('mcp', 'enabled', False),
'auto_start': self.get_bool('mcp', 'auto_start', False),
'transport': self.get('mcp', 'transport', 'sse'),
'host': self.get('mcp', 'host', '0.0.0.0'),
'port': self.get_int('mcp', 'port', 8081),
'log_level': self.get('mcp', 'log_level', 'INFO'),
'instructions': self.get('mcp', 'instructions', 'AUTARCH security framework tools'),
'auth_enabled': self.get_bool('mcp', 'auth_enabled', False),
'auth_token': self.get('mcp', 'auth_token', ''),
'rate_limit': self.get('mcp', 'rate_limit', ''),
'mask_errors': self.get_bool('mcp', 'mask_errors', False),
'request_timeout': self.get_int('mcp', 'request_timeout', 120),
'max_message_size': self.get_int('mcp', 'max_message_size', 10485760),
'cors_origins': self.get('mcp', 'cors_origins', '*'),
'ssl_enabled': self.get_bool('mcp', 'ssl_enabled', False),
'ssl_cert': self.get('mcp', 'ssl_cert', ''),
'ssl_key': self.get('mcp', 'ssl_key', ''),
'disabled_tools': self.get('mcp', 'disabled_tools', ''),
'nmap_timeout': self.get_int('mcp', 'nmap_timeout', 120),
'tcpdump_timeout': self.get_int('mcp', 'tcpdump_timeout', 30),
'whois_timeout': self.get_int('mcp', 'whois_timeout', 15),
'dns_timeout': self.get_int('mcp', 'dns_timeout', 10),
'geoip_timeout': self.get_int('mcp', 'geoip_timeout', 10),
'geoip_endpoint': self.get('mcp', 'geoip_endpoint', 'http://ip-api.com/json/'),
}
def get_tier_settings(self, tier: str) -> dict:
"""Get settings for a model tier (slm, sam, lam)."""
return {
@@ -398,6 +535,23 @@ class Config:
'log_max_entries': self.get_int('autonomy', 'log_max_entries', 1000),
}
def get_agents_settings(self) -> dict:
"""Get agent configuration settings."""
return {
'backend': self.get('agents', 'backend', 'local'),
'local_max_steps': self.get_int('agents', 'local_max_steps', 20),
'local_verbose': self.get_bool('agents', 'local_verbose', True),
'claude_enabled': self.get_bool('agents', 'claude_enabled', False),
'claude_model': self.get('agents', 'claude_model', 'claude-sonnet-4-6'),
'claude_max_tokens': self.get_int('agents', 'claude_max_tokens', 16384),
'claude_max_steps': self.get_int('agents', 'claude_max_steps', 30),
'openai_enabled': self.get_bool('agents', 'openai_enabled', False),
'openai_model': self.get('agents', 'openai_model', 'gpt-4o'),
'openai_base_url': self.get('agents', 'openai_base_url', 'https://api.openai.com/v1'),
'openai_max_tokens': self.get_int('agents', 'openai_max_tokens', 16384),
'openai_max_steps': self.get_int('agents', 'openai_max_steps', 30),
}
@staticmethod
def get_templates_dir() -> Path:
"""Get the path to the configuration templates directory."""

708
core/daemon.py Normal file
View File

@@ -0,0 +1,708 @@
#!/usr/bin/env python3
"""
AUTARCH Privileged Daemon
Runs as root, accepts commands from the unprivileged AUTARCH web process
over a Unix domain socket.
This allows Flask to run as a normal user while still executing privileged
operations (iptables, sysctl, iwlist scanning, systemctl, ARP manipulation, etc.)
Start: sudo python3 core/daemon.py
Socket: /var/run/autarch-daemon.sock
Protocol: newline-delimited JSON over Unix socket
Request: {"cmd": ["iptables", "-A", "INPUT", ...], "timeout": 15}
Response: {"ok": true, "stdout": "...", "stderr": "...", "code": 0}
"""
import hashlib
import hmac
import json
import logging
import os
import secrets
import signal
import socket
import subprocess
import sys
import threading
import time
from pathlib import Path
SOCKET_PATH = '/var/run/autarch-daemon.sock'
PID_FILE = '/var/run/autarch-daemon.pid'
LOG_FILE = '/var/log/autarch-daemon.log'
SECRET_FILE = '/var/run/autarch-daemon.secret'
MAX_MSG_SIZE = 1024 * 1024 # 1MB
NONCE_EXPIRY = 30 # Nonces valid for 30 seconds
# ── HMAC Authentication ───────────────────────────────────────────────────────
# The daemon generates a shared secret on startup and writes it to SECRET_FILE.
# The client reads the secret and signs every request with HMAC-SHA256.
# This prevents other users/processes from injecting commands.
_daemon_secret = b''
_used_nonces = set() # Replay protection
def _generate_daemon_secret() -> bytes:
"""Generate a random secret and write it to the secret file."""
secret = secrets.token_bytes(32)
with open(SECRET_FILE, 'wb') as f:
f.write(secret)
# Readable only by the autarch user's group
try:
autarch_dir = Path(__file__).parent.parent
owner_gid = autarch_dir.stat().st_gid
os.chown(SECRET_FILE, 0, owner_gid)
except Exception:
pass
os.chmod(SECRET_FILE, 0o640) # root can read/write, group can read
return secret
def _load_daemon_secret() -> bytes:
"""Load the shared secret from the secret file (client side)."""
try:
with open(SECRET_FILE, 'rb') as f:
return f.read()
except (OSError, PermissionError):
return b''
def _sign_request(payload_bytes: bytes, secret: bytes) -> str:
"""Create HMAC-SHA256 signature for a request."""
return hmac.new(secret, payload_bytes, hashlib.sha256).hexdigest()
def _verify_request(payload_bytes: bytes, signature: str, secret: bytes) -> bool:
"""Verify HMAC-SHA256 signature."""
expected = hmac.new(secret, payload_bytes, hashlib.sha256).hexdigest()
return hmac.compare_digest(expected, signature)
# No allowlist — any command can run EXCEPT those in the blocklist below.
# The daemon runs as root and is protected by HMAC auth + SO_PEERCRED,
# so only AUTARCH can talk to it. The blocklist catches destructive commands.
# Commands that are NEVER allowed, even if they match an allowed prefix
BLOCKED_COMMANDS = {
# ── Bricks the system (irreversible) ──
'rm -rf /',
'rm -rf /*',
'rm -rf /home',
'rm -rf /etc',
'rm -rf /var',
'rm -rf /usr',
'rm -rf /boot',
'mkfs /dev/sd',
'mkfs /dev/nvme',
'mkfs /dev/mmc',
'dd if=/dev/zero of=/dev/sd',
'dd if=/dev/zero of=/dev/nvme',
'dd if=/dev/zero of=/dev/mmc',
'dd if=/dev/random of=/dev/sd',
'shred /dev/sd',
'shred /dev/nvme',
'shred /dev/mmc',
'wipefs /dev/sd',
'wipefs /dev/nvme',
# ── Fork bombs ──
':(){',
':()',
# ── Reboot / shutdown (human decision only) ──
'reboot',
'shutdown',
'poweroff',
'halt',
'init 0',
'init 6',
'systemctl reboot',
'systemctl poweroff',
'systemctl halt',
# ── Bootloader (unrecoverable if wrong) ──
'update-grub',
'grub-install',
# ── Root account destruction ──
'passwd root',
'userdel root',
'deluser root',
'usermod -L root',
# ── Loopback kill (breaks everything including the daemon) ──
'ip link set lo down',
'ifconfig lo down',
# ── Partition table (destroys disk layout) ──
'fdisk /dev/sd',
'fdisk /dev/nvme',
'fdisk /dev/mmc',
'parted /dev/sd',
'parted /dev/nvme',
'cfdisk',
'sfdisk',
}
_log = logging.getLogger('autarch.daemon')
def is_command_allowed(cmd_parts: list) -> tuple:
"""Check if a command is allowed.
Args:
cmd_parts: Command as list of strings
Returns:
(allowed: bool, reason: str)
"""
if not cmd_parts:
return False, 'Empty command'
# Get the base command (strip path)
base_cmd = os.path.basename(cmd_parts[0])
# Remove 'sudo' prefix if present (we're already root)
if base_cmd == 'sudo' and len(cmd_parts) > 1:
cmd_parts = cmd_parts[1:]
base_cmd = os.path.basename(cmd_parts[0])
# Check against blocklist only
full_cmd = ' '.join(cmd_parts)
for blocked in BLOCKED_COMMANDS:
if blocked in full_cmd:
return False, f'Blocked: {blocked}'
return True, 'OK'
def execute_command(cmd_parts: list, timeout: int = 30, stdin_data: str = None) -> dict:
"""Execute a command and return the result.
Args:
cmd_parts: Command as list of strings
timeout: Maximum execution time in seconds
stdin_data: Optional data to send to stdin
Returns:
dict with ok, stdout, stderr, code
"""
# Strip sudo prefix — we're already root
if cmd_parts and cmd_parts[0] == 'sudo':
cmd_parts = cmd_parts[1:]
allowed, reason = is_command_allowed(cmd_parts)
if not allowed:
return {'ok': False, 'stdout': '', 'stderr': reason, 'code': -1}
try:
result = subprocess.run(
cmd_parts,
capture_output=True,
text=True,
timeout=timeout,
stdin=subprocess.PIPE if stdin_data else None,
input=stdin_data,
)
return {
'ok': result.returncode == 0,
'stdout': result.stdout,
'stderr': result.stderr,
'code': result.returncode,
}
except subprocess.TimeoutExpired:
return {'ok': False, 'stdout': '', 'stderr': f'Timeout after {timeout}s', 'code': -2}
except FileNotFoundError:
return {'ok': False, 'stdout': '', 'stderr': f'Command not found: {cmd_parts[0]}', 'code': -3}
except Exception as e:
return {'ok': False, 'stdout': '', 'stderr': str(e), 'code': -4}
def _verify_peer(conn: socket.socket) -> tuple:
"""Verify the connecting process is owned by the AUTARCH user.
Uses SO_PEERCRED on Linux to get the peer's UID/PID.
Returns (allowed: bool, info: str)."""
try:
import struct
# SO_PEERCRED returns (pid, uid, gid) as 3 unsigned ints
cred = conn.getsockopt(socket.SOL_SOCKET, socket.SO_PEERCRED, struct.calcsize('3i'))
pid, uid, gid = struct.unpack('3i', cred)
# Allow: root (uid 0), or the user who owns the autarch directory
autarch_dir = Path(__file__).parent.parent
owner_uid = autarch_dir.stat().st_uid
owner_gid = autarch_dir.stat().st_gid
if uid == 0 or uid == owner_uid or gid == owner_gid:
return True, f'pid={pid} uid={uid} gid={gid}'
else:
return False, f'Rejected: pid={pid} uid={uid} gid={gid} (expected uid={owner_uid} or gid={owner_gid})'
except (AttributeError, OSError):
# SO_PEERCRED not available (non-Linux) — fall back to HMAC-only auth
return True, 'peercred not available'
def _builtin_capture(request: dict) -> dict:
"""Run scapy packet capture as root. Called by the daemon directly."""
try:
from scapy.all import sniff, wrpcap
except ImportError:
return {'ok': False, 'error': 'scapy not available'}
interface = request.get('interface', '')
bpf_filter = request.get('filter', '')
duration = min(int(request.get('duration', 30)), 300)
max_packets = int(request.get('max_packets', 1000))
output_file = request.get('file', '')
if not output_file:
output_file = f'/tmp/autarch_capture_{os.getpid()}.pcap'
_log.info(f'[Capture] Starting: iface={interface or "any"} duration={duration}s filter={bpf_filter or "none"} file={output_file}')
try:
kwargs = {'timeout': duration, 'count': max_packets, 'store': True}
if interface:
kwargs['iface'] = interface
if bpf_filter:
kwargs['filter'] = bpf_filter
packets = sniff(**kwargs)
count = len(packets)
if packets and output_file:
wrpcap(output_file, packets)
os.chmod(output_file, 0o644) # Make readable by non-root
_log.info(f'[Capture] Done: {count} packets captured')
return {
'ok': True,
'packet_count': count,
'file': output_file if count > 0 else '',
'duration': duration,
}
except Exception as e:
_log.error(f'[Capture] Failed: {e}')
return {'ok': False, 'error': str(e)}
def _builtin_wifi_scan() -> dict:
"""Run WiFi scan as root using iw or nmcli."""
networks = []
try:
# Find wireless interface
iface = None
for name in os.listdir('/sys/class/net/'):
if os.path.isdir(f'/sys/class/net/{name}/wireless'):
iface = name
break
if not iface:
return {'ok': False, 'error': 'No wireless interface'}
# Try iw scan (needs root)
r = subprocess.run(['iw', 'dev', iface, 'scan'], capture_output=True, text=True, timeout=20)
if r.returncode == 0:
import re
current = {}
for line in r.stdout.split('\n'):
line = line.strip()
if line.startswith('BSS '):
if current.get('bssid'):
networks.append(current)
m = re.match(r'BSS ([\da-f:]+)', line)
current = {'bssid': m.group(1) if m else '', 'ssid': '', 'channel': '', 'signal': '', 'security': ''}
elif line.startswith('SSID:'):
current['ssid'] = line.split(':', 1)[1].strip() or '(Hidden)'
elif 'primary channel:' in line.lower():
m = re.search(r'(\d+)', line)
if m:
current['channel'] = m.group(1)
elif 'signal:' in line.lower():
m = re.search(r'(-?\d+)', line)
if m:
current['signal'] = m.group(1)
elif 'RSN' in line:
current['security'] = 'WPA2'
elif 'WPA' in line and current.get('security') != 'WPA2':
current['security'] = 'WPA'
if current.get('bssid'):
networks.append(current)
return {'ok': True, 'networks': networks, 'interface': iface}
except Exception as e:
return {'ok': False, 'error': str(e)}
return {'ok': False, 'error': 'WiFi scan failed'}
def handle_client(conn: socket.socket, addr):
"""Handle a single client connection."""
# Verify the peer is an authorized process
allowed, peer_info = _verify_peer(conn)
if not allowed:
_log.warning(f'Connection rejected: {peer_info}')
try:
conn.sendall(json.dumps({'ok': False, 'stderr': 'Unauthorized process'}).encode() + b'\n')
except Exception:
pass
conn.close()
return
try:
data = b''
while True:
chunk = conn.recv(4096)
if not chunk:
break
data += chunk
if b'\n' in data:
break
if len(data) > MAX_MSG_SIZE:
conn.sendall(json.dumps({'ok': False, 'stderr': 'Message too large'}).encode() + b'\n')
return
if not data:
return
# Parse request — format: {"payload": {...}, "sig": "hmac-hex", "nonce": "..."}
try:
envelope = json.loads(data.decode('utf-8').strip())
except json.JSONDecodeError as e:
conn.sendall(json.dumps({'ok': False, 'stderr': f'Invalid JSON: {e}'}).encode() + b'\n')
return
# ── HMAC Verification ──
if _daemon_secret:
sig = envelope.get('sig', '')
nonce = envelope.get('nonce', '')
payload_str = envelope.get('payload', '')
if not sig or not nonce or not payload_str:
_log.warning('Rejected: missing sig/nonce/payload')
conn.sendall(json.dumps({'ok': False, 'stderr': 'Authentication required'}).encode() + b'\n')
return
# Verify signature
payload_bytes = payload_str.encode() if isinstance(payload_str, str) else payload_str
if not _verify_request(payload_bytes, sig, _daemon_secret):
_log.warning('Rejected: invalid HMAC signature')
conn.sendall(json.dumps({'ok': False, 'stderr': 'Invalid signature'}).encode() + b'\n')
return
# Replay protection — check nonce hasn't been used and isn't too old
try:
nonce_time = float(nonce.split(':')[0])
if abs(time.time() - nonce_time) > NONCE_EXPIRY:
conn.sendall(json.dumps({'ok': False, 'stderr': 'Nonce expired'}).encode() + b'\n')
return
except (ValueError, IndexError):
conn.sendall(json.dumps({'ok': False, 'stderr': 'Invalid nonce'}).encode() + b'\n')
return
if nonce in _used_nonces:
conn.sendall(json.dumps({'ok': False, 'stderr': 'Nonce reused (replay detected)'}).encode() + b'\n')
return
_used_nonces.add(nonce)
# Prune old nonces periodically
if len(_used_nonces) > 10000:
_used_nonces.clear()
request = json.loads(payload_str)
else:
# No secret configured — accept unsigned (backwards compat during setup)
request = envelope
cmd = request.get('cmd')
timeout = min(request.get('timeout', 30), 300) # Cap at 5 minutes
stdin_data = request.get('stdin')
if not cmd:
conn.sendall(json.dumps({'ok': False, 'stderr': 'No cmd provided'}).encode() + b'\n')
return
# Handle string commands (split them)
if isinstance(cmd, str):
import shlex
cmd = shlex.split(cmd)
# ── Built-in actions (run Python directly as root, no shell) ──
if cmd and cmd[0] == '__capture__':
result = _builtin_capture(request)
response = json.dumps(result).encode() + b'\n'
conn.sendall(response)
return
if cmd and cmd[0] == '__wifi_scan__':
result = _builtin_wifi_scan()
response = json.dumps(result).encode() + b'\n'
conn.sendall(response)
return
_log.info(f'Executing: {" ".join(cmd[:6])}{"..." if len(cmd) > 6 else ""}')
# Execute
result = execute_command(cmd, timeout=timeout, stdin_data=stdin_data)
# Send response
response = json.dumps(result).encode() + b'\n'
conn.sendall(response)
except BrokenPipeError:
pass
except Exception as e:
_log.error(f'Client handler error: {e}', exc_info=True)
try:
conn.sendall(json.dumps({'ok': False, 'stderr': str(e)}).encode() + b'\n')
except Exception:
pass
finally:
conn.close()
def run_daemon():
"""Run the privileged daemon."""
global _daemon_secret
# Must be root
if os.geteuid() != 0:
print('ERROR: autarch-daemon must run as root', file=sys.stderr)
sys.exit(1)
# Generate shared secret for HMAC authentication
_daemon_secret = _generate_daemon_secret()
# Setup logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s [%(levelname)s] %(message)s',
handlers=[
logging.FileHandler(LOG_FILE),
logging.StreamHandler(),
]
)
# Remove stale socket
if os.path.exists(SOCKET_PATH):
os.unlink(SOCKET_PATH)
# Create socket
server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server.bind(SOCKET_PATH)
# Allow the autarch user (and group) to connect
os.chmod(SOCKET_PATH, 0o770)
# Try to set group ownership to the autarch user's group
try:
import pwd
# Find the user who owns the autarch directory
autarch_dir = Path(__file__).parent.parent
owner_uid = autarch_dir.stat().st_uid
owner_gid = autarch_dir.stat().st_gid
os.chown(SOCKET_PATH, 0, owner_gid) # root:snake
except Exception:
# Fallback: world-accessible (less secure but works)
os.chmod(SOCKET_PATH, 0o777)
server.listen(10)
# Write PID file
with open(PID_FILE, 'w') as f:
f.write(str(os.getpid()))
# Handle shutdown
def shutdown(signum, frame):
_log.info('Shutting down...')
server.close()
for f in (SOCKET_PATH, PID_FILE, SECRET_FILE):
if os.path.exists(f):
os.unlink(f)
sys.exit(0)
signal.signal(signal.SIGTERM, shutdown)
signal.signal(signal.SIGINT, shutdown)
_log.info(f'AUTARCH daemon started on {SOCKET_PATH} (PID {os.getpid()})')
_log.info(f'Blocked commands: {len(BLOCKED_COMMANDS)}')
while True:
try:
conn, addr = server.accept()
t = threading.Thread(target=handle_client, args=(conn, addr), daemon=True)
t.start()
except OSError:
break # Socket closed during shutdown
# ── Client API (used by Flask) ────────────────────────────────────────────────
def root_exec(cmd, timeout=30, stdin=None) -> dict:
"""Execute a command via the privileged daemon.
This is the function Flask routes call instead of subprocess.run()
when they need root privileges.
Args:
cmd: Command as string or list
timeout: Max execution time
stdin: Optional stdin data
Returns:
dict: {'ok': bool, 'stdout': str, 'stderr': str, 'code': int}
Falls back to direct subprocess if daemon is not running.
"""
if isinstance(cmd, str):
import shlex
cmd = shlex.split(cmd)
# Try daemon first
if os.path.exists(SOCKET_PATH):
try:
return _send_to_daemon(cmd, timeout, stdin)
except (ConnectionRefusedError, FileNotFoundError, OSError):
pass # Daemon not running, fall through
# Fallback: direct execution (works if we're already root)
if os.geteuid() == 0:
return execute_command(cmd, timeout=timeout, stdin_data=stdin)
# Fallback: try with sudo (use original subprocess.run to avoid hook recursion)
sudo_cmd = ['sudo', '-n'] + cmd # -n = non-interactive
run_fn = _original_subprocess_run or subprocess.run
try:
result = run_fn(
sudo_cmd, capture_output=True, text=True, timeout=timeout
)
return {
'ok': result.returncode == 0,
'stdout': result.stdout,
'stderr': result.stderr,
'code': result.returncode,
}
except Exception as e:
return {'ok': False, 'stdout': '', 'stderr': f'No daemon, not root, sudo failed: {e}', 'code': -5}
def _send_to_daemon(cmd, timeout, stdin) -> dict:
"""Send a signed command to the daemon via Unix socket."""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(timeout + 5) # Extra time for daemon processing
sock.connect(SOCKET_PATH)
payload = json.dumps({'cmd': cmd, 'timeout': timeout, 'stdin': stdin})
# Sign the request with HMAC if we have the shared secret
secret = _load_daemon_secret()
if secret:
nonce = f"{time.time()}:{secrets.token_hex(8)}"
sig = _sign_request(payload.encode(), secret)
envelope = json.dumps({'payload': payload, 'sig': sig, 'nonce': nonce})
else:
envelope = payload # Unsigned fallback
sock.sendall((envelope + '\n').encode())
# Read response
data = b''
while True:
chunk = sock.recv(4096)
if not chunk:
break
data += chunk
if b'\n' in data:
break
sock.close()
if not data:
return {'ok': False, 'stdout': '', 'stderr': 'Empty response from daemon', 'code': -6}
return json.loads(data.decode().strip())
# ── Global subprocess.run patch ───────────────────────────────────────────────
# Call install_subprocess_hook() once at startup to make ALL subprocess.run()
# calls with ['sudo', ...] auto-route through the daemon. This means we never
# miss a sudo call — even in third-party code or modules we haven't touched.
_original_subprocess_run = None
_hook_installed = False
def _patched_subprocess_run(cmd, *args, **kwargs):
"""Drop-in replacement for subprocess.run that intercepts sudo commands."""
# Only intercept list commands starting with 'sudo'
if isinstance(cmd, (list, tuple)) and len(cmd) > 1 and cmd[0] == 'sudo':
actual_cmd = list(cmd[1:])
# Strip -n flag if present (we don't need it, daemon is root)
if actual_cmd and actual_cmd[0] == '-n':
actual_cmd = actual_cmd[1:]
if actual_cmd and actual_cmd[0] == '-E':
actual_cmd = actual_cmd[1:]
timeout = kwargs.get('timeout', 30)
input_data = kwargs.get('input')
r = root_exec(actual_cmd, timeout=timeout, stdin=input_data)
# Return a subprocess.CompletedProcess to match the expected interface
result = subprocess.CompletedProcess(
args=cmd,
returncode=r['code'],
stdout=r['stdout'] if kwargs.get('text') or kwargs.get('capture_output') else r['stdout'].encode(),
stderr=r['stderr'] if kwargs.get('text') or kwargs.get('capture_output') else r['stderr'].encode(),
)
# If check=True was passed, raise on non-zero
if kwargs.get('check') and result.returncode != 0:
raise subprocess.CalledProcessError(
result.returncode, cmd, result.stdout, result.stderr
)
return result
# Not a sudo command — pass through to original subprocess.run
return _original_subprocess_run(cmd, *args, **kwargs)
def install_subprocess_hook():
"""Install the global subprocess.run hook that intercepts sudo calls.
Call this once at startup (e.g., in autarch.py or web/app.py).
Safe to call multiple times — only installs once.
"""
global _original_subprocess_run, _hook_installed
if _hook_installed:
return
_original_subprocess_run = subprocess.run
subprocess.run = _patched_subprocess_run
_hook_installed = True
_log.info('[Daemon] subprocess.run hook installed — sudo calls auto-route through daemon')
def uninstall_subprocess_hook():
"""Remove the hook and restore original subprocess.run."""
global _hook_installed
if _original_subprocess_run and _hook_installed:
subprocess.run = _original_subprocess_run
_hook_installed = False
def is_daemon_running() -> bool:
"""Check if the daemon is running."""
if not os.path.exists(SOCKET_PATH):
return False
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(2)
sock.connect(SOCKET_PATH)
sock.close()
return True
except (ConnectionRefusedError, FileNotFoundError, OSError):
return False
if __name__ == '__main__':
run_daemon()

View File

@@ -20,6 +20,8 @@ import logging
from pathlib import Path
from typing import Dict, Optional, Tuple
from core.daemon import root_exec
logger = logging.getLogger(__name__)
# Service constants
@@ -207,20 +209,11 @@ class DiscoveryManager:
"""Enable Bluetooth authentication/security on the adapter."""
try:
# Enable authentication
subprocess.run(
['sudo', 'hciconfig', 'hci0', 'auth'],
capture_output=True, text=True, timeout=5
)
root_exec(['hciconfig', 'hci0', 'auth'], timeout=5)
# Enable encryption
subprocess.run(
['sudo', 'hciconfig', 'hci0', 'encrypt'],
capture_output=True, text=True, timeout=5
)
root_exec(['hciconfig', 'hci0', 'encrypt'], timeout=5)
# Enable SSP (Secure Simple Pairing)
subprocess.run(
['sudo', 'hciconfig', 'hci0', 'sspmode', '1'],
capture_output=True, text=True, timeout=5
)
root_exec(['hciconfig', 'hci0', 'sspmode', '1'], timeout=5)
if self._bt_is_secure():
return True, "Bluetooth security enabled (AUTH + ENCRYPT + SSP)"
return False, "Security flags set but AUTH not confirmed"
@@ -246,10 +239,7 @@ class DiscoveryManager:
# Ensure adapter is up
try:
subprocess.run(
['sudo', 'hciconfig', 'hci0', 'up'],
capture_output=True, text=True, timeout=5
)
root_exec(['hciconfig', 'hci0', 'up'], timeout=5)
except Exception:
pass
@@ -263,16 +253,10 @@ class DiscoveryManager:
# Make discoverable and set name
try:
# Set device name
subprocess.run(
['sudo', 'hciconfig', 'hci0', 'name', BT_SERVICE_NAME],
capture_output=True, text=True, timeout=5
)
root_exec(['hciconfig', 'hci0', 'name', BT_SERVICE_NAME], timeout=5)
# Enable discoverable mode
subprocess.run(
['sudo', 'hciconfig', 'hci0', 'piscan'],
capture_output=True, text=True, timeout=5
)
root_exec(['hciconfig', 'hci0', 'piscan'], timeout=5)
# Use bluetoothctl to set discoverable with timeout 0 (always)
# and set the alias
@@ -339,10 +323,7 @@ class DiscoveryManager:
['bluetoothctl', 'discoverable', 'off'],
capture_output=True, text=True, timeout=5
)
subprocess.run(
['sudo', 'hciconfig', 'hci0', 'noscan'],
capture_output=True, text=True, timeout=5
)
root_exec(['hciconfig', 'hci0', 'noscan'], timeout=5)
if self._bt_thread:
self._bt_thread.join(timeout=3)

267
core/hal_analyst.py Normal file
View File

@@ -0,0 +1,267 @@
"""
AUTARCH HAL Analyst
Automatically analyzes tool output via the loaded LLM.
When a defensive/analysis tool produces output, this module sends it to
the active LLM backend for analysis. HAL identifies issues, explains
what the user is looking at, and optionally suggests fixes.
Usage:
from core.hal_analyst import analyze_output
result = analyze_output('log_analyzer', log_text, context='syslog')
"""
import json
import logging
import time
from typing import Optional
_log = logging.getLogger('autarch.hal_analyst')
# Categories that should NOT get auto-analysis (offensive tools)
EXCLUDED_BLUEPRINTS = {
'offense', 'loadtest', 'phishmail', 'social_eng', 'hack_hijack',
'c2_framework', 'deauth', 'pineapple', 'exploit_dev', 'sms_forge',
'rcs_tools', 'starlink_hack', 'iphone_exploit',
}
# Prompts tailored per tool category
ANALYSIS_PROMPTS = {
'default': (
"You are HAL, the AUTARCH security analyst. Analyze the following tool output. "
"Be concise but thorough. Structure your response as:\n"
"1. **Summary**: One sentence about what this data shows\n"
"2. **Findings**: List any issues, anomalies, or notable items\n"
"3. **Risk Level**: CLEAN / LOW / MEDIUM / HIGH / CRITICAL\n"
"4. **Recommendation**: What the user should do (if anything)\n\n"
"Tool: {tool_name}\n"
"Context: {context}\n\n"
"--- OUTPUT ---\n{output}\n--- END ---"
),
'log_analysis': (
"You are HAL, the AUTARCH security analyst. Analyze these system logs for security issues. "
"Look for: failed login attempts, privilege escalation, suspicious processes, "
"unusual network connections, file permission changes, service failures, "
"and any indicators of compromise.\n\n"
"Be specific about line numbers or timestamps where issues appear.\n\n"
"Structure your response as:\n"
"1. **Summary**: What these logs show\n"
"2. **Issues Found**: Specific problems with details\n"
"3. **Risk Level**: CLEAN / LOW / MEDIUM / HIGH / CRITICAL\n"
"4. **Fix**: Exact commands or steps to resolve each issue\n\n"
"--- LOGS ---\n{output}\n--- END ---"
),
'network': (
"You are HAL, the AUTARCH network security analyst. Analyze this network data. "
"Look for: suspicious connections, unusual ports, potential backdoors, "
"ARP anomalies, rogue devices, and any signs of intrusion.\n\n"
"Structure your response as:\n"
"1. **Summary**: Network status overview\n"
"2. **Findings**: Suspicious items with details\n"
"3. **Risk Level**: CLEAN / LOW / MEDIUM / HIGH / CRITICAL\n"
"4. **Action**: Commands to investigate or fix issues\n\n"
"Tool: {tool_name}\n\n"
"--- DATA ---\n{output}\n--- END ---"
),
'defense': (
"You are HAL, the AUTARCH defensive security analyst. "
"Analyze ONLY the specific output provided below. Do NOT expand scope beyond what was tested. "
"If this is a single check (firewall only, SSH only, etc.), only comment on that one check. "
"Do NOT perform or suggest a full system audit unless the output contains multiple checks.\n\n"
"Keep your response short and focused on the actual data shown.\n\n"
"Structure:\n"
"1. Summary (one sentence)\n"
"2. Issues found (if any)\n"
"3. Risk Level: CLEAN / LOW / MEDIUM / HIGH / CRITICAL\n"
"4. Fix commands (only for issues found in THIS output)\n\n"
"Tool: {tool_name}\nContext: {context}\n\n"
"--- OUTPUT ---\n{output}\n--- END ---"
),
'counter': (
"You are HAL, the AUTARCH threat analyst. Analyze this threat detection output. "
"Look for active compromises, persistent threats, backdoors, rootkits, "
"and indicators of compromise.\n\n"
"Be urgent and specific about any active threats found.\n\n"
"Structure your response as:\n"
"1. **Summary**: Threat landscape\n"
"2. **Active Threats**: Any confirmed or suspected compromises\n"
"3. **Risk Level**: CLEAN / LOW / MEDIUM / HIGH / CRITICAL\n"
"4. **Immediate Action**: Steps to contain and remediate\n\n"
"--- DATA ---\n{output}\n--- END ---"
),
'android': (
"You are HAL, the AUTARCH mobile security analyst. Analyze this Android device output. "
"Look for: suspicious apps, dangerous permissions, stalkerware indicators, "
"root detection, SELinux status, unusual processes, and security misconfigurations.\n\n"
"Structure your response as:\n"
"1. Summary: What this data shows\n"
"2. Findings: Issues or notable items\n"
"3. Risk Level: CLEAN / LOW / MEDIUM / HIGH / CRITICAL\n"
"4. Fix: Exact adb or device commands to resolve issues\n\n"
"Tool: {tool_name}\nContext: {context}\n\n"
"--- OUTPUT ---\n{output}\n--- END ---"
),
'analyze': (
"You are HAL, the AUTARCH forensics analyst. Analyze this forensic data. "
"Look for malware indicators, suspicious strings, anomalous file properties, "
"and any signs of tampering or malicious content.\n\n"
"Structure your response as:\n"
"1. **Summary**: What this data represents\n"
"2. **Findings**: Notable or suspicious items\n"
"3. **Risk Level**: CLEAN / LOW / MEDIUM / HIGH / CRITICAL\n"
"4. **Recommendation**: Further analysis or actions needed\n\n"
"Tool: {tool_name}\n\n"
"--- DATA ---\n{output}\n--- END ---"
),
}
def is_llm_available() -> bool:
"""Check if any LLM backend is loaded and ready."""
try:
from core.llm import get_llm
llm = get_llm()
return llm is not None and llm.is_loaded
except Exception:
return False
def analyze_output(
tool_name: str,
output: str,
context: str = '',
category: str = 'default',
max_output_chars: int = 8000,
) -> dict:
"""Send tool output to the loaded LLM for analysis.
Args:
tool_name: Name of the tool that produced the output
output: The raw output text to analyze
context: Additional context (e.g., 'syslog', 'auth.log', 'ARP table')
category: Analysis category for prompt selection
max_output_chars: Truncate output to this length to fit context windows
Returns:
dict with keys:
available (bool): Whether LLM was available
analysis (str): The LLM's analysis text
risk_level (str): Extracted risk level (CLEAN/LOW/MEDIUM/HIGH/CRITICAL)
has_fixes (bool): Whether the analysis contains fix commands
tool_name (str): Echo back the tool name
"""
result = {
'available': False,
'analysis': '',
'risk_level': 'unknown',
'has_fixes': False,
'tool_name': tool_name,
}
if not output or not output.strip():
result['analysis'] = 'No output to analyze.'
return result
# Check LLM
try:
from core.llm import get_llm
llm = get_llm()
if not llm or not llm.is_loaded:
result['analysis'] = 'No LLM loaded — enable a model in LLM Settings to get AI analysis.'
return result
except Exception as e:
_log.debug(f"[HAL] LLM not available: {e}")
result['analysis'] = f'LLM not available: {e}'
return result
result['available'] = True
# Truncate output if too long
if len(output) > max_output_chars:
output = output[:max_output_chars] + f'\n\n... [truncated — {len(output)} chars total]'
# Select prompt template
prompt_template = ANALYSIS_PROMPTS.get(category, ANALYSIS_PROMPTS['default'])
prompt = prompt_template.format(
tool_name=tool_name,
output=output,
context=context or 'general',
)
# Detect current OS for context
import platform as _plat
_os_name = _plat.system()
_os_detail = _plat.platform()
# Send to LLM
try:
_log.info(f"[HAL] Analyzing output from {tool_name} ({len(output)} chars, category={category})")
start = time.time()
response = llm.chat(prompt, system_prompt=(
"You are HAL, the AI security analyst for the AUTARCH platform. "
f"This system is running {_os_name} ({_os_detail}). "
"ONLY suggest commands for this operating system. "
"If the tool output is from the WRONG platform (e.g. Windows scan results on a Linux host), "
"immediately tell the user they ran the wrong scan and point them to the correct one. "
"Do NOT use markdown formatting. Plain text only. No ** or ## or ``` or bullet points. "
"Be specific, cite evidence from the data, and provide exact commands to fix issues."
))
elapsed = time.time() - start
_log.info(f"[HAL] Analysis complete in {elapsed:.1f}s ({len(response)} chars)")
result['analysis'] = response
# Extract risk level from response
for level in ['CRITICAL', 'HIGH', 'MEDIUM', 'LOW', 'CLEAN']:
if level in response.upper():
result['risk_level'] = level.lower()
break
# Check if response contains fix commands
result['has_fixes'] = any(x in response for x in [
'```', 'sudo ', 'systemctl ', 'iptables ', 'chmod ', 'chown ',
'apt ', 'ufw ', 'sshd_config', 'Fix:', 'fix:', 'Command:',
'adb ', 'fastboot ', 'pm ', 'am ', 'dumpsys ', 'settings put ',
])
except Exception as e:
_log.error(f"[HAL] Analysis failed: {e}", exc_info=True)
result['analysis'] = f'Analysis failed: {e}'
return result
def extract_fix_commands(analysis: str) -> list:
"""Extract actionable commands from an analysis response.
Looks for commands in code blocks or after 'Fix:' / 'Command:' markers.
Returns a list of command strings.
"""
commands = []
in_code_block = False
code_block = []
for line in analysis.split('\n'):
stripped = line.strip()
# Code blocks
if stripped.startswith('```'):
if in_code_block:
if code_block:
commands.extend(code_block)
code_block = []
in_code_block = not in_code_block
continue
if in_code_block:
if stripped and not stripped.startswith('#'):
code_block.append(stripped)
continue
# Inline commands after markers
if stripped.startswith(('sudo ', '$ ', '# ')) and len(stripped) > 5:
cmd = stripped.lstrip('$# ').strip()
if cmd:
commands.append(cmd)
return commands

217
core/hal_memory.py Normal file
View File

@@ -0,0 +1,217 @@
"""
AUTARCH HAL Memory Cache
Encrypted conversation history for the HAL AI agent.
Stores all HAL conversations in an AES-encrypted file.
Only the AI agent system can read them — the decryption key is
derived from the machine ID + a HAL-specific salt, same pattern
as the vault but with a separate keyspace.
Max size: configurable, default 4GB. Trims oldest entries when exceeded.
Usage:
from core.hal_memory import get_hal_memory
mem = get_hal_memory()
mem.add('user', 'scan my network')
mem.add('hal', 'Running nmap on 10.0.0.0/24...')
history = mem.get_history(last_n=50)
mem.add_context('scan_result', {'tool': 'nmap', 'output': '...'})
"""
import hashlib
import json
import logging
import os
import struct
import time
from pathlib import Path
from typing import Optional
_log = logging.getLogger('autarch.hal_memory')
_MEMORY_DIR = Path(__file__).parent.parent / 'data'
_MEMORY_FILE = _MEMORY_DIR / 'hal_memory.enc'
_MEMORY_MAGIC = b'HALM'
_MEMORY_VERSION = 1
_DEFAULT_MAX_BYTES = 4 * 1024 * 1024 * 1024 # 4GB
def _derive_key(salt: bytes) -> bytes:
"""Derive AES key from machine identity + HAL-specific material."""
machine_id = b''
for path in ('/etc/machine-id', '/var/lib/dbus/machine-id'):
try:
with open(path) as f:
machine_id = f.read().strip().encode()
break
except Exception:
continue
if not machine_id:
import socket
machine_id = f"hal-{socket.gethostname()}".encode()
return hashlib.pbkdf2_hmac('sha256', machine_id + b'HAL-MEMORY-KEY', salt, 100_000, dklen=32)
def _encrypt(plaintext: bytes, key: bytes) -> tuple:
iv = os.urandom(16)
try:
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives.padding import PKCS7
padder = PKCS7(128).padder()
padded = padder.update(plaintext) + padder.finalize()
enc = Cipher(algorithms.AES(key), modes.CBC(iv)).encryptor()
return iv, enc.update(padded) + enc.finalize()
except ImportError:
pass
try:
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad
return iv, AES.new(key, AES.MODE_CBC, iv).encrypt(pad(plaintext, 16))
except ImportError:
raise RuntimeError('No crypto backend available')
def _decrypt(iv: bytes, ciphertext: bytes, key: bytes) -> bytes:
try:
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives.padding import PKCS7
dec = Cipher(algorithms.AES(key), modes.CBC(iv)).decryptor()
padded = dec.update(ciphertext) + dec.finalize()
return PKCS7(128).unpadder().update(padded) + PKCS7(128).unpadder().finalize()
except ImportError:
pass
try:
from Crypto.Cipher import AES
from Crypto.Util.Padding import unpad
return unpad(AES.new(key, AES.MODE_CBC, iv).decrypt(ciphertext), 16)
except ImportError:
raise RuntimeError('No crypto backend available')
class HalMemory:
"""Encrypted conversation memory for HAL."""
def __init__(self, max_bytes: int = _DEFAULT_MAX_BYTES):
self._max_bytes = max_bytes
self._salt = b''
self._entries = []
self._load()
def _load(self):
if not _MEMORY_FILE.exists():
self._salt = os.urandom(32)
self._entries = []
return
try:
with open(_MEMORY_FILE, 'rb') as f:
magic = f.read(4)
if magic != _MEMORY_MAGIC:
self._salt = os.urandom(32)
self._entries = []
return
f.read(1) # version
self._salt = f.read(32)
iv = f.read(16)
ciphertext = f.read()
key = _derive_key(self._salt)
plaintext = _decrypt(iv, ciphertext, key)
self._entries = json.loads(plaintext.decode('utf-8'))
_log.info(f'[HAL Memory] Loaded {len(self._entries)} entries')
except Exception as e:
_log.error(f'[HAL Memory] Load failed: {e}')
self._salt = os.urandom(32)
self._entries = []
def _save(self):
try:
_MEMORY_DIR.mkdir(parents=True, exist_ok=True)
key = _derive_key(self._salt)
plaintext = json.dumps(self._entries).encode('utf-8')
# Trim if over max size
while len(plaintext) > self._max_bytes and len(self._entries) > 10:
self._entries = self._entries[len(self._entries) // 4:] # Drop oldest 25%
plaintext = json.dumps(self._entries).encode('utf-8')
_log.info(f'[HAL Memory] Trimmed to {len(self._entries)} entries ({len(plaintext)} bytes)')
iv, ciphertext = _encrypt(plaintext, key)
with open(_MEMORY_FILE, 'wb') as f:
f.write(_MEMORY_MAGIC)
f.write(struct.pack('B', _MEMORY_VERSION))
f.write(self._salt)
f.write(iv)
f.write(ciphertext)
os.chmod(_MEMORY_FILE, 0o600)
except Exception as e:
_log.error(f'[HAL Memory] Save failed: {e}')
def add(self, role: str, content: str, metadata: dict = None):
"""Add a conversation entry."""
entry = {
'role': role,
'content': content,
'timestamp': time.time(),
}
if metadata:
entry['metadata'] = metadata
self._entries.append(entry)
# Auto-save every 20 entries
if len(self._entries) % 20 == 0:
self._save()
def add_context(self, context_type: str, data: dict):
"""Add a context entry (scan result, fix result, IR, etc.)."""
self.add('context', json.dumps(data), metadata={'type': context_type})
def get_history(self, last_n: int = 50) -> list:
"""Get recent conversation history."""
return self._entries[-last_n:] if self._entries else []
def get_full_history(self) -> list:
"""Get all entries."""
return self._entries
def search(self, query: str, max_results: int = 20) -> list:
"""Search memory for entries containing query string."""
query_lower = query.lower()
results = []
for entry in reversed(self._entries):
if query_lower in entry.get('content', '').lower():
results.append(entry)
if len(results) >= max_results:
break
return results
def clear(self):
"""Clear all memory."""
self._entries = []
self._save()
def save(self):
"""Force save to disk."""
self._save()
def stats(self) -> dict:
"""Get memory stats."""
total_bytes = len(json.dumps(self._entries).encode())
return {
'entries': len(self._entries),
'bytes': total_bytes,
'max_bytes': self._max_bytes,
'percent_used': round(total_bytes / self._max_bytes * 100, 2) if self._max_bytes else 0,
}
# Singleton
_instance: Optional[HalMemory] = None
def get_hal_memory(max_bytes: int = None) -> HalMemory:
global _instance
if _instance is None:
from core.config import get_config
config = get_config()
if max_bytes is None:
max_bytes = config.get_int('hal_memory', 'max_bytes', _DEFAULT_MAX_BYTES)
_instance = HalMemory(max_bytes=max_bytes)
return _instance

View File

@@ -188,14 +188,15 @@ def _run_nmap(args: dict, config) -> str:
cmd.append(target)
try:
result = subprocess.run(cmd, capture_output=True, text=True, timeout=120)
nmap_timeout = config.get_int('mcp', 'nmap_timeout', 120)
result = subprocess.run(cmd, capture_output=True, text=True, timeout=nmap_timeout)
return json.dumps({
'stdout': result.stdout,
'stderr': result.stderr,
'exit_code': result.returncode
})
except subprocess.TimeoutExpired:
return json.dumps({'error': 'Scan timed out after 120 seconds'})
return json.dumps({'error': f'Scan timed out after {nmap_timeout} seconds'})
except Exception as e:
return json.dumps({'error': str(e)})
@@ -207,8 +208,11 @@ def _run_geoip(args: dict) -> str:
try:
import urllib.request
url = f"http://ip-api.com/json/{ip}?fields=status,message,country,regionName,city,zip,lat,lon,timezone,isp,org,as,query"
with urllib.request.urlopen(url, timeout=10) as resp:
config = get_config()
geoip_endpoint = config.get('mcp', 'geoip_endpoint', 'http://ip-api.com/json/')
geoip_timeout = config.get_int('mcp', 'geoip_timeout', 10)
url = f"{geoip_endpoint}{ip}?fields=status,message,country,regionName,city,zip,lat,lon,timezone,isp,org,as,query"
with urllib.request.urlopen(url, timeout=geoip_timeout) as resp:
return resp.read().decode()
except Exception as e:
return json.dumps({'error': str(e)})
@@ -219,11 +223,13 @@ def _run_dns(args: dict) -> str:
if not domain:
return json.dumps({'error': 'domain is required'})
config = get_config()
dns_timeout = config.get_int('mcp', 'dns_timeout', 10)
record_type = args.get('record_type', 'A')
try:
result = subprocess.run(
['dig', '+short', domain, record_type],
capture_output=True, text=True, timeout=10
capture_output=True, text=True, timeout=dns_timeout
)
records = [r for r in result.stdout.strip().split('\n') if r]
return json.dumps({'domain': domain, 'type': record_type, 'records': records})
@@ -244,10 +250,12 @@ def _run_whois(args: dict) -> str:
if not target:
return json.dumps({'error': 'target is required'})
config = get_config()
whois_timeout = config.get_int('mcp', 'whois_timeout', 15)
try:
result = subprocess.run(
['whois', target],
capture_output=True, text=True, timeout=15
capture_output=True, text=True, timeout=whois_timeout
)
return json.dumps({'target': target, 'output': result.stdout[:4000]})
except FileNotFoundError:
@@ -274,7 +282,9 @@ def _run_tcpdump(args: dict) -> str:
cmd.append(bpf_filter)
try:
result = subprocess.run(cmd, capture_output=True, text=True, timeout=30)
config = get_config()
tcpdump_timeout = config.get_int('mcp', 'tcpdump_timeout', 30)
result = subprocess.run(cmd, capture_output=True, text=True, timeout=tcpdump_timeout)
return json.dumps({
'stdout': result.stdout,
'stderr': result.stderr,
@@ -428,68 +438,95 @@ def create_mcp_server():
"""Create and return the FastMCP server instance."""
from mcp.server.fastmcp import FastMCP
mcp = FastMCP("autarch", instructions="AUTARCH security framework tools")
config = get_config()
mcp_settings = config.get_mcp_settings()
fastmcp_kwargs = {
'instructions': mcp_settings['instructions'],
}
if mcp_settings['log_level']:
fastmcp_kwargs['log_level'] = mcp_settings['log_level']
if mcp_settings['mask_errors']:
fastmcp_kwargs['mask_error_details'] = True
if mcp_settings['rate_limit']:
fastmcp_kwargs['rate_limit'] = mcp_settings['rate_limit']
mcp = FastMCP("autarch", **fastmcp_kwargs)
# Filter out disabled tools
disabled = set(t.strip() for t in mcp_settings['disabled_tools'].split(',') if t.strip())
# Register all tools
tool_defs = get_autarch_tools()
@mcp.tool()
def nmap_scan(target: str, ports: str = "", scan_type: str = "quick") -> str:
"""Run an nmap network scan against a target. Returns scan results including open ports and services."""
return execute_tool('nmap_scan', {'target': target, 'ports': ports, 'scan_type': scan_type})
if 'nmap_scan' not in disabled:
@mcp.tool()
def nmap_scan(target: str, ports: str = "", scan_type: str = "quick") -> str:
"""Run an nmap network scan against a target. Returns scan results including open ports and services."""
return execute_tool('nmap_scan', {'target': target, 'ports': ports, 'scan_type': scan_type})
@mcp.tool()
def geoip_lookup(ip: str) -> str:
"""Look up geographic and network information for an IP address."""
return execute_tool('geoip_lookup', {'ip': ip})
if 'geoip_lookup' not in disabled:
@mcp.tool()
def geoip_lookup(ip: str) -> str:
"""Look up geographic and network information for an IP address."""
return execute_tool('geoip_lookup', {'ip': ip})
@mcp.tool()
def dns_lookup(domain: str, record_type: str = "A") -> str:
"""Perform DNS lookups for a domain. Supports A, AAAA, MX, NS, TXT, CNAME, SOA record types."""
return execute_tool('dns_lookup', {'domain': domain, 'record_type': record_type})
if 'dns_lookup' not in disabled:
@mcp.tool()
def dns_lookup(domain: str, record_type: str = "A") -> str:
"""Perform DNS lookups for a domain. Supports A, AAAA, MX, NS, TXT, CNAME, SOA record types."""
return execute_tool('dns_lookup', {'domain': domain, 'record_type': record_type})
@mcp.tool()
def whois_lookup(target: str) -> str:
"""Perform WHOIS lookup for a domain or IP address."""
return execute_tool('whois_lookup', {'target': target})
if 'whois_lookup' not in disabled:
@mcp.tool()
def whois_lookup(target: str) -> str:
"""Perform WHOIS lookup for a domain or IP address."""
return execute_tool('whois_lookup', {'target': target})
@mcp.tool()
def packet_capture(interface: str = "", count: int = 10, filter: str = "") -> str:
"""Capture network packets using tcpdump. Returns captured packet summary."""
return execute_tool('packet_capture', {'interface': interface, 'count': count, 'filter': filter})
if 'packet_capture' not in disabled:
@mcp.tool()
def packet_capture(interface: str = "", count: int = 10, filter: str = "") -> str:
"""Capture network packets using tcpdump. Returns captured packet summary."""
return execute_tool('packet_capture', {'interface': interface, 'count': count, 'filter': filter})
@mcp.tool()
def wireguard_status() -> str:
"""Get WireGuard VPN tunnel status and peer information."""
return execute_tool('wireguard_status', {})
if 'wireguard_status' not in disabled:
@mcp.tool()
def wireguard_status() -> str:
"""Get WireGuard VPN tunnel status and peer information."""
return execute_tool('wireguard_status', {})
@mcp.tool()
def upnp_status() -> str:
"""Get UPnP port mapping status."""
return execute_tool('upnp_status', {})
if 'upnp_status' not in disabled:
@mcp.tool()
def upnp_status() -> str:
"""Get UPnP port mapping status."""
return execute_tool('upnp_status', {})
@mcp.tool()
def system_info() -> str:
"""Get AUTARCH system information: hostname, platform, uptime, tool availability."""
return execute_tool('system_info', {})
if 'system_info' not in disabled:
@mcp.tool()
def system_info() -> str:
"""Get AUTARCH system information: hostname, platform, uptime, tool availability."""
return execute_tool('system_info', {})
@mcp.tool()
def llm_chat(message: str, system_prompt: str = "") -> str:
"""Send a message to the currently configured LLM backend and get a response."""
args = {'message': message}
if system_prompt:
args['system_prompt'] = system_prompt
return execute_tool('llm_chat', args)
if 'llm_chat' not in disabled:
@mcp.tool()
def llm_chat(message: str, system_prompt: str = "") -> str:
"""Send a message to the currently configured LLM backend and get a response."""
args = {'message': message}
if system_prompt:
args['system_prompt'] = system_prompt
return execute_tool('llm_chat', args)
@mcp.tool()
def android_devices() -> str:
"""List connected Android devices via ADB."""
return execute_tool('android_devices', {})
if 'android_devices' not in disabled:
@mcp.tool()
def android_devices() -> str:
"""List connected Android devices via ADB."""
return execute_tool('android_devices', {})
@mcp.tool()
def config_get(section: str, key: str) -> str:
"""Get an AUTARCH configuration value. Sensitive keys (api_key, password) are blocked."""
return execute_tool('config_get', {'section': section, 'key': key})
if 'config_get' not in disabled:
@mcp.tool()
def config_get(section: str, key: str) -> str:
"""Get an AUTARCH configuration value. Sensitive keys (api_key, password) are blocked."""
return execute_tool('config_get', {'section': section, 'key': key})
return mcp
@@ -502,6 +539,12 @@ def run_stdio():
def run_sse(host: str = '0.0.0.0', port: int = 8081):
"""Run the MCP server in SSE (Server-Sent Events) mode for web clients."""
config = get_config()
mcp_settings = config.get_mcp_settings()
if host == '0.0.0.0':
host = mcp_settings['host']
if port == 8081:
port = mcp_settings['port']
mcp = create_mcp_server()
mcp.run(transport='sse', host=host, port=port)
@@ -535,6 +578,13 @@ def start_sse_server(host: str = '0.0.0.0', port: int = 8081) -> dict:
"""Start the MCP SSE server in the background."""
global _server_process
config = get_config()
mcp_settings = config.get_mcp_settings()
if host == '0.0.0.0':
host = mcp_settings['host']
if port == 8081:
port = mcp_settings['port']
status = get_server_status()
if status['running']:
return {'ok': False, 'error': f'Already running (PID {status["pid"]})'}

View File

@@ -11,6 +11,7 @@ from typing import Dict, List, Optional, Callable
from .banner import Colors, display_banner, clear_screen
from .config import get_config
from core.daemon import root_exec
# Module categories
@@ -2437,30 +2438,32 @@ class MainMenu:
break
elif choice == "1" and not installed:
try:
subprocess.run(['sudo', 'cp', str(SERVICE_FILE), str(SYSTEMD_PATH)], check=True)
subprocess.run(['sudo', 'systemctl', 'daemon-reload'], check=True)
r = root_exec(['cp', str(SERVICE_FILE), str(SYSTEMD_PATH)])
if not r['ok']: raise subprocess.CalledProcessError(r['code'], 'cp')
r = root_exec(['systemctl', 'daemon-reload'])
if not r['ok']: raise subprocess.CalledProcessError(r['code'], 'systemctl')
self.print_status("Service installed", "success")
except Exception as e:
self.print_status(f"Install failed: {e}", "error")
input(f"\n{Colors.WHITE} Press Enter...{Colors.RESET}")
elif choice == "1" and installed:
subprocess.run(['sudo', 'systemctl', 'start', SERVICE_NAME])
root_exec(['systemctl', 'start', SERVICE_NAME])
self.print_status("Service started", "success")
input(f"\n{Colors.WHITE} Press Enter...{Colors.RESET}")
elif choice == "2":
subprocess.run(['sudo', 'systemctl', 'stop', SERVICE_NAME])
root_exec(['systemctl', 'stop', SERVICE_NAME])
self.print_status("Service stopped", "success")
input(f"\n{Colors.WHITE} Press Enter...{Colors.RESET}")
elif choice == "3":
subprocess.run(['sudo', 'systemctl', 'restart', SERVICE_NAME])
root_exec(['systemctl', 'restart', SERVICE_NAME])
self.print_status("Service restarted", "success")
input(f"\n{Colors.WHITE} Press Enter...{Colors.RESET}")
elif choice == "4":
subprocess.run(['sudo', 'systemctl', 'enable', SERVICE_NAME])
root_exec(['systemctl', 'enable', SERVICE_NAME])
self.print_status("Auto-start enabled", "success")
input(f"\n{Colors.WHITE} Press Enter...{Colors.RESET}")
elif choice == "5":
subprocess.run(['sudo', 'systemctl', 'disable', SERVICE_NAME])
root_exec(['systemctl', 'disable', SERVICE_NAME])
self.print_status("Auto-start disabled", "success")
input(f"\n{Colors.WHITE} Press Enter...{Colors.RESET}")
elif choice == "6":

View File

@@ -24,6 +24,7 @@ except ImportError:
from .config import get_config
from .banner import Colors
from core.daemon import root_exec
class MSFError(Exception):
@@ -672,11 +673,11 @@ class MSFManager:
except PermissionError:
if use_sudo:
try:
subprocess.run(['sudo', 'kill', '-TERM', str(pid)], timeout=5)
root_exec(['kill', '-TERM', str(pid)], timeout=5)
time.sleep(1)
try:
os.kill(int(pid), 0)
subprocess.run(['sudo', 'kill', '-KILL', str(pid)], timeout=5)
root_exec(['kill', '-KILL', str(pid)], timeout=5)
except ProcessLookupError:
pass
return True
@@ -692,7 +693,7 @@ class MSFManager:
# Try pkill as fallback
try:
if use_sudo:
subprocess.run(['sudo', 'pkill', '-f', 'msfrpcd'], timeout=5)
root_exec(['pkill', '-f', 'msfrpcd'], timeout=5)
else:
subprocess.run(['pkill', '-f', 'msfrpcd'], timeout=5)
time.sleep(1)
@@ -810,9 +811,10 @@ class MSFManager:
if is_win and msfrpcd_bin.endswith('.bat'):
cmd = ['cmd', '/c'] + cmd
# Prepend sudo on Linux if requested
# Note: msfrpcd runs through the daemon if root is needed
# For Popen-based background processes, we don't use the daemon socket
if not is_win and use_sudo:
cmd = ['sudo'] + cmd
cmd = ['sudo', '-n'] + cmd # non-interactive sudo fallback
try:
# Start msfrpcd in background

230
core/vault.py Normal file
View File

@@ -0,0 +1,230 @@
"""
AUTARCH Secrets Vault
Encrypted storage for API keys, tokens, and sensitive credentials.
Stores secrets in data/vault.enc using AES-256-CBC with a machine-derived key.
The key is derived from a combination of:
- Machine ID (/etc/machine-id or hostname)
- The vault salt (random, stored alongside the ciphertext)
- PBKDF2-HMAC-SHA256 with 200,000 iterations
This means:
- Secrets are encrypted at rest (not plaintext in .conf files)
- The vault is tied to this machine (moving the file to another machine won't decrypt it)
- No master password needed for normal operation (machine identity IS the key)
- Optionally, a user-provided master password can be added for extra security
Usage:
from core.vault import get_vault
vault = get_vault()
# Store a secret
vault.set('claude_api_key', 'sk-ant-...')
vault.set('openai_api_key', 'sk-...')
# Retrieve a secret
key = vault.get('claude_api_key') # Returns '' if not set
# List stored keys (not values)
vault.keys() # ['claude_api_key', 'openai_api_key']
# Delete a secret
vault.delete('old_key')
"""
import hashlib
import json
import logging
import os
import secrets
import struct
from pathlib import Path
from typing import Optional
_log = logging.getLogger('autarch.vault')
# Vault file location
_VAULT_DIR = Path(__file__).parent.parent / 'data'
_VAULT_FILE = _VAULT_DIR / 'vault.enc'
_VAULT_MAGIC = b'ATVL' # AUTARCH VauLt
_VAULT_VERSION = 1
def _get_machine_id() -> bytes:
"""Get a stable machine identifier for key derivation."""
# Try /etc/machine-id (Linux, unique per install)
for path in ('/etc/machine-id', '/var/lib/dbus/machine-id'):
try:
with open(path) as f:
mid = f.read().strip()
if mid:
return mid.encode()
except (OSError, PermissionError):
continue
# Fallback: hostname + username + home dir (less unique but works everywhere)
import socket
fallback = f"{socket.gethostname()}:{os.getenv('USER', 'autarch')}:{Path.home()}"
return fallback.encode()
def _derive_key(salt: bytes, master_password: str = '') -> bytes:
"""Derive a 32-byte AES key from machine ID + optional master password."""
machine_id = _get_machine_id()
material = machine_id + master_password.encode()
return hashlib.pbkdf2_hmac('sha256', material, salt, 200_000, dklen=32)
def _encrypt(plaintext: bytes, key: bytes) -> tuple:
"""Encrypt with AES-256-CBC. Returns (iv, ciphertext)."""
iv = os.urandom(16)
try:
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives.padding import PKCS7
padder = PKCS7(128).padder()
padded = padder.update(plaintext) + padder.finalize()
cipher = Cipher(algorithms.AES(key), modes.CBC(iv))
enc = cipher.encryptor()
ct = enc.update(padded) + enc.finalize()
return iv, ct
except ImportError:
pass
try:
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad
cipher = AES.new(key, AES.MODE_CBC, iv)
ct = cipher.encrypt(pad(plaintext, 16))
return iv, ct
except ImportError:
pass
raise RuntimeError('No crypto backend available (install cryptography or PyCryptodome)')
def _decrypt(iv: bytes, ciphertext: bytes, key: bytes) -> bytes:
"""Decrypt AES-256-CBC. Returns plaintext."""
try:
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives.padding import PKCS7
cipher = Cipher(algorithms.AES(key), modes.CBC(iv))
dec = cipher.decryptor()
padded = dec.update(ciphertext) + dec.finalize()
unpadder = PKCS7(128).unpadder()
return unpadder.update(padded) + unpadder.finalize()
except ImportError:
pass
try:
from Crypto.Cipher import AES
from Crypto.Util.Padding import unpad
cipher = AES.new(key, AES.MODE_CBC, iv)
return unpad(cipher.decrypt(ciphertext), 16)
except ImportError:
pass
raise RuntimeError('No crypto backend available')
class Vault:
"""Encrypted secrets vault."""
def __init__(self, vault_path: Path = None, master_password: str = ''):
self._path = vault_path or _VAULT_FILE
self._master = master_password
self._secrets: dict = {}
self._salt: bytes = b''
self._load()
def _load(self):
"""Load and decrypt the vault file."""
if not self._path.exists():
self._salt = os.urandom(32)
self._secrets = {}
return
try:
with open(self._path, 'rb') as f:
magic = f.read(4)
if magic != _VAULT_MAGIC:
_log.warning('[Vault] Invalid vault file — starting fresh')
self._salt = os.urandom(32)
self._secrets = {}
return
version = struct.unpack('B', f.read(1))[0]
self._salt = f.read(32)
iv = f.read(16)
ciphertext = f.read()
key = _derive_key(self._salt, self._master)
plaintext = _decrypt(iv, ciphertext, key)
self._secrets = json.loads(plaintext.decode('utf-8'))
_log.info(f'[Vault] Loaded {len(self._secrets)} secret(s)')
except Exception as e:
_log.error(f'[Vault] Failed to load vault: {e}')
self._salt = os.urandom(32)
self._secrets = {}
def _save(self):
"""Encrypt and write the vault file."""
try:
self._path.parent.mkdir(parents=True, exist_ok=True)
key = _derive_key(self._salt, self._master)
plaintext = json.dumps(self._secrets).encode('utf-8')
iv, ciphertext = _encrypt(plaintext, key)
with open(self._path, 'wb') as f:
f.write(_VAULT_MAGIC)
f.write(struct.pack('B', _VAULT_VERSION))
f.write(self._salt)
f.write(iv)
f.write(ciphertext)
# Restrict permissions
os.chmod(self._path, 0o600)
_log.info(f'[Vault] Saved {len(self._secrets)} secret(s)')
except Exception as e:
_log.error(f'[Vault] Failed to save: {e}')
raise
def get(self, key: str, default: str = '') -> str:
"""Get a secret value."""
return self._secrets.get(key, default)
def set(self, key: str, value: str):
"""Set a secret value and save."""
self._secrets[key] = value
self._save()
def delete(self, key: str):
"""Delete a secret and save."""
self._secrets.pop(key, None)
self._save()
def keys(self) -> list:
"""List all stored secret names."""
return list(self._secrets.keys())
def has(self, key: str) -> bool:
"""Check if a secret exists."""
return key in self._secrets
def export_masked(self) -> dict:
"""Export secrets with values masked (for UI display)."""
return {k: v[:8] + '...' if len(v) > 12 else '***' for k, v in self._secrets.items()}
# ── Singleton ─────────────────────────────────────────────────────────────────
_vault_instance: Optional[Vault] = None
def get_vault(master_password: str = '') -> Vault:
"""Get the global vault instance."""
global _vault_instance
if _vault_instance is None:
_vault_instance = Vault(master_password=master_password)
return _vault_instance

View File

@@ -18,6 +18,7 @@ from pathlib import Path
from typing import Optional, Dict, List, Any, Tuple
from core.paths import get_data_dir, find_tool
from core.daemon import root_exec
class WireGuardManager:
@@ -59,20 +60,20 @@ class WireGuardManager:
return ('', str(e), 1)
def _run_wg_sudo(self, args, timeout=10):
"""Run wg command with sudo, return (stdout, stderr, rc)."""
"""Run wg command with root privileges via daemon, return (stdout, stderr, rc)."""
if not self._wg_bin:
return ('', 'wg binary not found', 1)
cmd = ['sudo', self._wg_bin] + args
try:
proc = subprocess.run(cmd, capture_output=True, text=True, timeout=timeout)
return (proc.stdout, proc.stderr, proc.returncode)
except subprocess.TimeoutExpired:
return ('', 'Command timed out', 1)
except Exception as e:
return ('', str(e), 1)
cmd = [str(self._wg_bin)] + args
r = root_exec(cmd, timeout=timeout)
return (r['stdout'], r['stderr'], r['code'])
def _run_cmd(self, cmd, timeout=10, input_data=None):
"""Run arbitrary command, return (stdout, stderr, rc)."""
"""Run arbitrary command, return (stdout, stderr, rc).
Commands starting with 'sudo' are routed through the privileged daemon."""
# Strip sudo and route through daemon
if cmd and cmd[0] == 'sudo':
r = root_exec(cmd[1:], timeout=timeout, stdin=input_data)
return (r['stdout'], r['stderr'], r['code'])
try:
proc = subprocess.run(
cmd, capture_output=True, text=True,

View File

@@ -59,14 +59,19 @@ class WiresharkManager:
@property
def can_capture(self):
"""Check if live capture is possible (needs root + libpcap)."""
if not SCAPY_AVAILABLE:
return False
"""Check if live capture is possible (via capture agent, daemon, or root)."""
# Check daemon (handles capture via __capture__ builtin)
try:
from core.daemon import is_daemon_running
if is_daemon_running():
return True
except Exception:
pass
# Check root
try:
return os.geteuid() == 0
except AttributeError:
# Windows - check differently
return True
return True # Windows — assume capture is possible
def get_status(self) -> Dict[str, Any]:
"""Get engine status."""
@@ -165,23 +170,84 @@ class WiresharkManager:
def _do_capture():
try:
kwargs = {
'timeout': duration,
'prn': self._packet_handler,
'store': True,
}
if interface:
kwargs['iface'] = interface
if bpf_filter:
kwargs['filter'] = bpf_filter
# Method 1: Direct scapy capture (if root)
is_root = False
try:
is_root = os.geteuid() == 0
except AttributeError:
pass
packets = sniff(**kwargs)
self._last_packets = packets
# Save to pcap
if self._capture_file and packets:
wrpcap(self._capture_file, packets)
self._capture_stats['output_file'] = self._capture_file
if is_root:
kwargs = {'timeout': duration, 'prn': self._packet_handler, 'store': True}
if interface:
kwargs['iface'] = interface
if bpf_filter:
kwargs['filter'] = bpf_filter
packets = sniff(**kwargs)
self._last_packets = packets
if self._capture_file and packets:
wrpcap(self._capture_file, packets)
self._capture_stats['output_file'] = self._capture_file
else:
# Method 3: scapy via daemon (daemon runs as root, can sniff)
from core.daemon import root_exec
r = root_exec(
['__capture__'],
timeout=duration + 10,
stdin=json.dumps({
'interface': interface or '',
'filter': bpf_filter or '',
'duration': duration,
'max_packets': 500,
'file': self._capture_file,
})
)
# root_exec sends __capture__ as the cmd, but the daemon
# handles it as a built-in — it reads params from the request
# Let's call it properly through the socket
import socket as _sock
DAEMON_SOCK = '/var/run/autarch-daemon.sock'
if os.path.exists(DAEMON_SOCK):
sock = _sock.socket(_sock.AF_UNIX, _sock.SOCK_STREAM)
sock.settimeout(duration + 15)
sock.connect(DAEMON_SOCK)
payload = json.dumps({
'cmd': ['__capture__'],
'interface': interface or '',
'filter': bpf_filter or '',
'duration': duration,
'max_packets': 500,
'file': self._capture_file,
})
# Sign if secret available
from core.daemon import _load_daemon_secret, _sign_request
import secrets as _secrets
secret = _load_daemon_secret()
if secret:
nonce = f"{time.time()}:{_secrets.token_hex(8)}"
sig = _sign_request(payload.encode(), secret)
envelope = json.dumps({'payload': payload, 'sig': sig, 'nonce': nonce})
else:
envelope = payload
sock.sendall((envelope + '\n').encode())
data = b''
while True:
chunk = sock.recv(8192)
if not chunk:
break
data += chunk
if b'\n' in data:
break
sock.close()
if data:
result = json.loads(data.decode().strip())
if result.get('ok') and os.path.exists(self._capture_file):
packets = rdpcap(self._capture_file)
self._last_packets = packets
self._capture_stats['output_file'] = self._capture_file
self._capture_stats['packet_count'] = len(packets)
for pkt in packets:
self._packet_handler(pkt)
except Exception as e:
self._capture_stats['error'] = str(e)