AUTARCH v1.9 — remote monitoring, SSH manager, daemon, vault, cleanup

- Add Remote Monitoring Station with PIAP device profile system
- Add SSH/SSHD manager with fail2ban integration
- Add privileged daemon architecture for safe root operations
- Add encrypted vault, HAL memory, HAL auto-analyst
- Add network security suite, module creator, codex training
- Add start.sh launcher script and GTK3 desktop launcher
- Remove Output/ build artifacts, installer files, loose docs
- Update .gitignore for runtime data and build artifacts
- Update README for v1.9 with new launch method, screenshots, and features

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
SsSnake
2026-03-24 06:59:06 -07:00
parent 1092689f45
commit da53899f66
382 changed files with 15277 additions and 493964 deletions

View File

@@ -1,6 +1,7 @@
"""Chat and Agent API routes — Hal chat with Agent system for module creation."""
import json
import os
import threading
import time
import uuid
@@ -53,6 +54,14 @@ def chat():
if not message:
return jsonify({'error': 'No message provided'})
# Store in HAL's encrypted memory
try:
from core.hal_memory import get_hal_memory
mem = get_hal_memory()
mem.add('user', message, metadata={'mode': mode})
except Exception:
pass
if mode == 'agent':
return _handle_agent_chat(message)
else:
@@ -77,8 +86,16 @@ def _handle_direct_chat(message):
system_prompt = _get_system_prompt()
try:
token_gen = llm.chat(message, system_prompt=system_prompt, stream=True)
full_response = []
for token in token_gen:
full_response.append(token)
yield f"data: {json.dumps({'token': token})}\n\n"
# Store HAL's response in memory
try:
from core.hal_memory import get_hal_memory
get_hal_memory().add('hal', ''.join(full_response))
except Exception:
pass
except LLMError as e:
yield f"data: {json.dumps({'type': 'error', 'content': str(e)})}\n\n"
@@ -113,11 +130,27 @@ def _handle_agent_chat(message):
tools = get_tool_registry()
agent = Agent(llm=llm, tools=tools, max_steps=20, verbose=False)
# Inject system prompt into agent
system_prompt = _get_system_prompt()
agent.SYSTEM_PROMPT = system_prompt + "\n\n{tools_description}"
# Inject system prompt — keep the THOUGHT/ACTION/PARAMS format from Agent,
# prepend with our behavioral rules
hal_prompt = _get_system_prompt()
agent.SYSTEM_PROMPT = hal_prompt + """
FORMAT — you MUST use this exact format:
THOUGHT: your reasoning
ACTION: tool_name
PARAMS: {{"param": "value"}}
When done: ACTION: task_complete PARAMS: {{"summary": "what was done"}}
When you need input: ACTION: ask_user PARAMS: {{"question": "your question"}}
{tools_description}
"""
def on_step(step):
# Check stop signal
if stop_event.is_set():
return
if step.thought:
steps.append({'type': 'thought', 'content': step.thought})
if step.tool_name and step.tool_name not in ('task_complete', 'ask_user'):
@@ -135,6 +168,22 @@ def _handle_agent_chat(message):
else:
steps.append({'type': 'error', 'content': result.error or result.summary})
# Store agent conversation in HAL memory
try:
from core.hal_memory import get_hal_memory
mem = get_hal_memory()
for step in result.steps:
if step.thought:
mem.add('hal_thought', step.thought)
if step.tool_name:
mem.add('hal_action', f'{step.tool_name}({json.dumps(step.tool_args or {})})')
if step.tool_result:
mem.add('hal_result', step.tool_result[:2000])
mem.add('hal', result.summary if result.success else (result.error or result.summary))
mem.save()
except Exception:
pass
except Exception as e:
steps.append({'type': 'error', 'content': str(e)})
finally:
@@ -181,6 +230,143 @@ def chat_reset():
return jsonify({'ok': True})
@chat_bp.route('/hal/analyze', methods=['POST'])
@login_required
def hal_analyze():
"""Send tool output to HAL for AI analysis.
Expects JSON: {tool_name, output, context?, category?}
Returns JSON: {available, analysis, risk_level, has_fixes, tool_name}
"""
data = request.get_json(silent=True) or {}
tool_name = data.get('tool_name', 'unknown')
output = data.get('output', '')
context = data.get('context', '')
category = data.get('category', 'default')
if not output:
return jsonify({'available': False, 'analysis': 'No output provided', 'tool_name': tool_name})
from core.hal_analyst import analyze_output
result = analyze_output(tool_name, output, context=context, category=category)
return jsonify(result)
@chat_bp.route('/hal/fix', methods=['POST'])
@login_required
def hal_fix():
"""Execute a fix command suggested by HAL.
Expects JSON: {command: str}
Returns JSON: {ok, output, exit_code}
"""
from core.daemon import root_exec
import shlex
import subprocess as _subprocess
data = request.get_json(silent=True) or {}
command = data.get('command', '').strip()
if not command:
return jsonify({'ok': False, 'error': 'No command provided'})
# Safety: block obviously dangerous commands
dangerous = ['rm -rf /', 'mkfs', 'dd if=', ':(){', 'format c:']
for d in dangerous:
if d in command.lower():
return jsonify({'ok': False, 'error': f'Blocked dangerous command: {d}'})
# Clean the command: strip sudo, shell redirections
import re
command = re.sub(r'\s*2>/dev/null\s*', ' ', command)
command = re.sub(r'\s*>/dev/null\s*', ' ', command)
command = re.sub(r'\s*2>&1\s*', ' ', command)
command = command.strip()
if command.startswith('sudo '):
command = command[5:].strip()
# Commands that should run as the normal user, not root
USER_COMMANDS = {'adb', 'fastboot'}
def _is_user_cmd(cmd_str):
"""Check if a command should run as normal user."""
base = cmd_str.split()[0] if cmd_str.split() else ''
return os.path.basename(base) in USER_COMMANDS
def _run_user(cmd_parts, timeout=60):
"""Run a command as the normal user via subprocess."""
try:
result = _subprocess.run(
cmd_parts, capture_output=True, text=True, timeout=timeout
)
return {
'ok': result.returncode == 0,
'stdout': result.stdout,
'stderr': result.stderr,
'code': result.returncode,
}
except _subprocess.TimeoutExpired:
return {'ok': False, 'stdout': '', 'stderr': f'Timeout after {timeout}s', 'code': -2}
except FileNotFoundError:
return {'ok': False, 'stdout': '', 'stderr': f'Command not found: {cmd_parts[0]}', 'code': -3}
except Exception as e:
return {'ok': False, 'stdout': '', 'stderr': str(e), 'code': -4}
def _exec(cmd_parts, timeout=60):
"""Route to user or root execution based on command."""
if cmd_parts and os.path.basename(cmd_parts[0]) in USER_COMMANDS:
return _run_user(cmd_parts, timeout=timeout)
return root_exec(cmd_parts, timeout=timeout)
# Handle pipes (cmd1 | cmd2) — run as shell command through bash
if '|' in command:
if _is_user_cmd(command):
r = _run_user(['bash', '-c', command], timeout=60)
else:
r = root_exec(['bash', '-c', command], timeout=60)
return jsonify({'ok': r['ok'], 'output': r['stdout'] + r['stderr'], 'exit_code': r['code']})
# Handle chained commands (&&) by running them sequentially
if '&&' in command:
parts = [c.strip() for c in command.split('&&') if c.strip()]
all_output = ''
for part in parts:
if part.startswith('sudo '):
part = part[5:].strip()
part = re.sub(r'\s*2>/dev/null\s*', ' ', part).strip()
part = re.sub(r'\s*>/dev/null\s*', ' ', part).strip()
try:
cmd_parts = shlex.split(part)
except ValueError:
cmd_parts = part.split()
r = _exec(cmd_parts, timeout=60)
all_output += r['stdout'] + r['stderr']
if not r['ok']:
return jsonify({'ok': False, 'output': all_output, 'exit_code': r['code']})
return jsonify({'ok': True, 'output': all_output, 'exit_code': 0})
# Single command
try:
cmd_parts = shlex.split(command)
except ValueError:
cmd_parts = command.split()
r = _exec(cmd_parts, timeout=60)
return jsonify({
'ok': r['ok'],
'output': r['stdout'] + r['stderr'],
'exit_code': r['code'],
})
@chat_bp.route('/hal/available')
@login_required
def hal_available():
"""Quick check if HAL analysis is available (LLM loaded)."""
from core.hal_analyst import is_llm_available
return jsonify({'available': is_llm_available()})
@chat_bp.route('/chat/status')
@login_required
def chat_status():